1 //===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling, if-conversion, and other late
11 // optimizations. This pass should be run after register allocation but before
12 // the post-regalloc scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMBaseRegisterInfo.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "MCTargetDesc/ARMAddressingModes.h"
23 #include "llvm/CodeGen/LivePhysRegs.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/Support/Debug.h"
27 
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "arm-pseudo"
31 
32 static cl::opt<bool>
33 VerifyARMPseudo("verify-arm-pseudo-expand", cl::Hidden,
34                 cl::desc("Verify machine code after expanding ARM pseudos"));
35 
36 #define ARM_EXPAND_PSEUDO_NAME "ARM pseudo instruction expansion pass"
37 
38 namespace {
39   class ARMExpandPseudo : public MachineFunctionPass {
40   public:
41     static char ID;
ARMExpandPseudo()42     ARMExpandPseudo() : MachineFunctionPass(ID) {}
43 
44     const ARMBaseInstrInfo *TII;
45     const TargetRegisterInfo *TRI;
46     const ARMSubtarget *STI;
47     ARMFunctionInfo *AFI;
48 
49     bool runOnMachineFunction(MachineFunction &Fn) override;
50 
getRequiredProperties() const51     MachineFunctionProperties getRequiredProperties() const override {
52       return MachineFunctionProperties().set(
53           MachineFunctionProperties::Property::NoVRegs);
54     }
55 
getPassName() const56     StringRef getPassName() const override {
57       return ARM_EXPAND_PSEUDO_NAME;
58     }
59 
60   private:
61     void TransferImpOps(MachineInstr &OldMI,
62                         MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI);
63     bool ExpandMI(MachineBasicBlock &MBB,
64                   MachineBasicBlock::iterator MBBI,
65                   MachineBasicBlock::iterator &NextMBBI);
66     bool ExpandMBB(MachineBasicBlock &MBB);
67     void ExpandVLD(MachineBasicBlock::iterator &MBBI);
68     void ExpandVST(MachineBasicBlock::iterator &MBBI);
69     void ExpandLaneOp(MachineBasicBlock::iterator &MBBI);
70     void ExpandVTBL(MachineBasicBlock::iterator &MBBI,
71                     unsigned Opc, bool IsExt);
72     void ExpandMOV32BitImm(MachineBasicBlock &MBB,
73                            MachineBasicBlock::iterator &MBBI);
74     void CMSEClearGPRegs(MachineBasicBlock &MBB,
75                          MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
76                          const SmallVectorImpl<unsigned> &ClearRegs,
77                          unsigned ClobberReg);
78     MachineBasicBlock &CMSEClearFPRegs(MachineBasicBlock &MBB,
79                                        MachineBasicBlock::iterator MBBI);
80     MachineBasicBlock &CMSEClearFPRegsV8(MachineBasicBlock &MBB,
81                                          MachineBasicBlock::iterator MBBI,
82                                          const BitVector &ClearRegs);
83     MachineBasicBlock &CMSEClearFPRegsV81(MachineBasicBlock &MBB,
84                                           MachineBasicBlock::iterator MBBI,
85                                           const BitVector &ClearRegs);
86     void CMSESaveClearFPRegs(MachineBasicBlock &MBB,
87                              MachineBasicBlock::iterator MBBI, DebugLoc &DL,
88                              const LivePhysRegs &LiveRegs,
89                              SmallVectorImpl<unsigned> &AvailableRegs);
90     void CMSESaveClearFPRegsV8(MachineBasicBlock &MBB,
91                                MachineBasicBlock::iterator MBBI, DebugLoc &DL,
92                                const LivePhysRegs &LiveRegs,
93                                SmallVectorImpl<unsigned> &ScratchRegs);
94     void CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
95                                 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
96                                 const LivePhysRegs &LiveRegs);
97     void CMSERestoreFPRegs(MachineBasicBlock &MBB,
98                            MachineBasicBlock::iterator MBBI, DebugLoc &DL,
99                            SmallVectorImpl<unsigned> &AvailableRegs);
100     void CMSERestoreFPRegsV8(MachineBasicBlock &MBB,
101                              MachineBasicBlock::iterator MBBI, DebugLoc &DL,
102                              SmallVectorImpl<unsigned> &AvailableRegs);
103     void CMSERestoreFPRegsV81(MachineBasicBlock &MBB,
104                               MachineBasicBlock::iterator MBBI, DebugLoc &DL,
105                               SmallVectorImpl<unsigned> &AvailableRegs);
106     bool ExpandCMP_SWAP(MachineBasicBlock &MBB,
107                         MachineBasicBlock::iterator MBBI, unsigned LdrexOp,
108                         unsigned StrexOp, unsigned UxtOp,
109                         MachineBasicBlock::iterator &NextMBBI);
110 
111     bool ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
112                            MachineBasicBlock::iterator MBBI,
113                            MachineBasicBlock::iterator &NextMBBI);
114   };
115   char ARMExpandPseudo::ID = 0;
116 }
117 
INITIALIZE_PASS(ARMExpandPseudo,DEBUG_TYPE,ARM_EXPAND_PSEUDO_NAME,false,false)118 INITIALIZE_PASS(ARMExpandPseudo, DEBUG_TYPE, ARM_EXPAND_PSEUDO_NAME, false,
119                 false)
120 
121 /// TransferImpOps - Transfer implicit operands on the pseudo instruction to
122 /// the instructions created from the expansion.
123 void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
124                                      MachineInstrBuilder &UseMI,
125                                      MachineInstrBuilder &DefMI) {
126   const MCInstrDesc &Desc = OldMI.getDesc();
127   for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands();
128        i != e; ++i) {
129     const MachineOperand &MO = OldMI.getOperand(i);
130     assert(MO.isReg() && MO.getReg());
131     if (MO.isUse())
132       UseMI.add(MO);
133     else
134       DefMI.add(MO);
135   }
136 }
137 
138 namespace {
139   // Constants for register spacing in NEON load/store instructions.
140   // For quad-register load-lane and store-lane pseudo instructors, the
141   // spacing is initially assumed to be EvenDblSpc, and that is changed to
142   // OddDblSpc depending on the lane number operand.
143   enum NEONRegSpacing {
144     SingleSpc,
145     SingleLowSpc ,  // Single spacing, low registers, three and four vectors.
146     SingleHighQSpc, // Single spacing, high registers, four vectors.
147     SingleHighTSpc, // Single spacing, high registers, three vectors.
148     EvenDblSpc,
149     OddDblSpc
150   };
151 
152   // Entries for NEON load/store information table.  The table is sorted by
153   // PseudoOpc for fast binary-search lookups.
154   struct NEONLdStTableEntry {
155     uint16_t PseudoOpc;
156     uint16_t RealOpc;
157     bool IsLoad;
158     bool isUpdating;
159     bool hasWritebackOperand;
160     uint8_t RegSpacing; // One of type NEONRegSpacing
161     uint8_t NumRegs; // D registers loaded or stored
162     uint8_t RegElts; // elements per D register; used for lane ops
163     // FIXME: Temporary flag to denote whether the real instruction takes
164     // a single register (like the encoding) or all of the registers in
165     // the list (like the asm syntax and the isel DAG). When all definitions
166     // are converted to take only the single encoded register, this will
167     // go away.
168     bool copyAllListRegs;
169 
170     // Comparison methods for binary search of the table.
operator <__anon3c06bd5e0211::NEONLdStTableEntry171     bool operator<(const NEONLdStTableEntry &TE) const {
172       return PseudoOpc < TE.PseudoOpc;
173     }
operator <(const NEONLdStTableEntry & TE,unsigned PseudoOpc)174     friend bool operator<(const NEONLdStTableEntry &TE, unsigned PseudoOpc) {
175       return TE.PseudoOpc < PseudoOpc;
176     }
operator <(unsigned PseudoOpc,const NEONLdStTableEntry & TE)177     friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned PseudoOpc,
178                                                 const NEONLdStTableEntry &TE) {
179       return PseudoOpc < TE.PseudoOpc;
180     }
181   };
182 }
183 
184 static const NEONLdStTableEntry NEONLdStTable[] = {
185 { ARM::VLD1LNq16Pseudo,     ARM::VLD1LNd16,     true, false, false, EvenDblSpc, 1, 4 ,true},
186 { ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, true,  EvenDblSpc, 1, 4 ,true},
187 { ARM::VLD1LNq32Pseudo,     ARM::VLD1LNd32,     true, false, false, EvenDblSpc, 1, 2 ,true},
188 { ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, true,  EvenDblSpc, 1, 2 ,true},
189 { ARM::VLD1LNq8Pseudo,      ARM::VLD1LNd8,      true, false, false, EvenDblSpc, 1, 8 ,true},
190 { ARM::VLD1LNq8Pseudo_UPD,  ARM::VLD1LNd8_UPD, true, true, true,  EvenDblSpc, 1, 8 ,true},
191 
192 { ARM::VLD1d16QPseudo,      ARM::VLD1d16Q,     true,  false, false, SingleSpc,  4, 4 ,false},
193 { ARM::VLD1d16TPseudo,      ARM::VLD1d16T,     true,  false, false, SingleSpc,  3, 4 ,false},
194 { ARM::VLD1d32QPseudo,      ARM::VLD1d32Q,     true,  false, false, SingleSpc,  4, 2 ,false},
195 { ARM::VLD1d32TPseudo,      ARM::VLD1d32T,     true,  false, false, SingleSpc,  3, 2 ,false},
196 { ARM::VLD1d64QPseudo,      ARM::VLD1d64Q,     true,  false, false, SingleSpc,  4, 1 ,false},
197 { ARM::VLD1d64QPseudoWB_fixed,  ARM::VLD1d64Qwb_fixed,   true,  true, false, SingleSpc,  4, 1 ,false},
198 { ARM::VLD1d64QPseudoWB_register,  ARM::VLD1d64Qwb_register,   true,  true, true, SingleSpc,  4, 1 ,false},
199 { ARM::VLD1d64TPseudo,      ARM::VLD1d64T,     true,  false, false, SingleSpc,  3, 1 ,false},
200 { ARM::VLD1d64TPseudoWB_fixed,  ARM::VLD1d64Twb_fixed,   true,  true, false, SingleSpc,  3, 1 ,false},
201 { ARM::VLD1d64TPseudoWB_register,  ARM::VLD1d64Twb_register, true, true, true,  SingleSpc,  3, 1 ,false},
202 { ARM::VLD1d8QPseudo,       ARM::VLD1d8Q,      true,  false, false, SingleSpc,  4, 8 ,false},
203 { ARM::VLD1d8TPseudo,       ARM::VLD1d8T,      true,  false, false, SingleSpc,  3, 8 ,false},
204 { ARM::VLD1q16HighQPseudo,  ARM::VLD1d16Q,     true,  false, false, SingleHighQSpc,  4, 4 ,false},
205 { ARM::VLD1q16HighTPseudo,  ARM::VLD1d16T,     true,  false, false, SingleHighTSpc,  3, 4 ,false},
206 { ARM::VLD1q16LowQPseudo_UPD,  ARM::VLD1d16Qwb_fixed,   true,  true, true, SingleLowSpc,  4, 4 ,false},
207 { ARM::VLD1q16LowTPseudo_UPD,  ARM::VLD1d16Twb_fixed,   true,  true, true, SingleLowSpc,  3, 4 ,false},
208 { ARM::VLD1q32HighQPseudo,  ARM::VLD1d32Q,     true,  false, false, SingleHighQSpc,  4, 2 ,false},
209 { ARM::VLD1q32HighTPseudo,  ARM::VLD1d32T,     true,  false, false, SingleHighTSpc,  3, 2 ,false},
210 { ARM::VLD1q32LowQPseudo_UPD,  ARM::VLD1d32Qwb_fixed,   true,  true, true, SingleLowSpc,  4, 2 ,false},
211 { ARM::VLD1q32LowTPseudo_UPD,  ARM::VLD1d32Twb_fixed,   true,  true, true, SingleLowSpc,  3, 2 ,false},
212 { ARM::VLD1q64HighQPseudo,  ARM::VLD1d64Q,     true,  false, false, SingleHighQSpc,  4, 1 ,false},
213 { ARM::VLD1q64HighTPseudo,  ARM::VLD1d64T,     true,  false, false, SingleHighTSpc,  3, 1 ,false},
214 { ARM::VLD1q64LowQPseudo_UPD,  ARM::VLD1d64Qwb_fixed,   true,  true, true, SingleLowSpc,  4, 1 ,false},
215 { ARM::VLD1q64LowTPseudo_UPD,  ARM::VLD1d64Twb_fixed,   true,  true, true, SingleLowSpc,  3, 1 ,false},
216 { ARM::VLD1q8HighQPseudo,   ARM::VLD1d8Q,     true,  false, false, SingleHighQSpc,  4, 8 ,false},
217 { ARM::VLD1q8HighTPseudo,   ARM::VLD1d8T,     true,  false, false, SingleHighTSpc,  3, 8 ,false},
218 { ARM::VLD1q8LowQPseudo_UPD,  ARM::VLD1d8Qwb_fixed,   true,  true, true, SingleLowSpc,  4, 8 ,false},
219 { ARM::VLD1q8LowTPseudo_UPD,  ARM::VLD1d8Twb_fixed,   true,  true, true, SingleLowSpc,  3, 8 ,false},
220 
221 { ARM::VLD2DUPq16EvenPseudo,  ARM::VLD2DUPd16x2,  true, false, false, EvenDblSpc, 2, 4 ,false},
222 { ARM::VLD2DUPq16OddPseudo,   ARM::VLD2DUPd16x2,  true, false, false, OddDblSpc,  2, 4 ,false},
223 { ARM::VLD2DUPq32EvenPseudo,  ARM::VLD2DUPd32x2,  true, false, false, EvenDblSpc, 2, 2 ,false},
224 { ARM::VLD2DUPq32OddPseudo,   ARM::VLD2DUPd32x2,  true, false, false, OddDblSpc,  2, 2 ,false},
225 { ARM::VLD2DUPq8EvenPseudo,   ARM::VLD2DUPd8x2,   true, false, false, EvenDblSpc, 2, 8 ,false},
226 { ARM::VLD2DUPq8OddPseudo,    ARM::VLD2DUPd8x2,   true, false, false, OddDblSpc,  2, 8 ,false},
227 
228 { ARM::VLD2LNd16Pseudo,     ARM::VLD2LNd16,     true, false, false, SingleSpc,  2, 4 ,true},
229 { ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, true,  SingleSpc,  2, 4 ,true},
230 { ARM::VLD2LNd32Pseudo,     ARM::VLD2LNd32,     true, false, false, SingleSpc,  2, 2 ,true},
231 { ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, true,  SingleSpc,  2, 2 ,true},
232 { ARM::VLD2LNd8Pseudo,      ARM::VLD2LNd8,      true, false, false, SingleSpc,  2, 8 ,true},
233 { ARM::VLD2LNd8Pseudo_UPD,  ARM::VLD2LNd8_UPD, true, true, true,  SingleSpc,  2, 8 ,true},
234 { ARM::VLD2LNq16Pseudo,     ARM::VLD2LNq16,     true, false, false, EvenDblSpc, 2, 4 ,true},
235 { ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, true,  EvenDblSpc, 2, 4 ,true},
236 { ARM::VLD2LNq32Pseudo,     ARM::VLD2LNq32,     true, false, false, EvenDblSpc, 2, 2 ,true},
237 { ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, true,  EvenDblSpc, 2, 2 ,true},
238 
239 { ARM::VLD2q16Pseudo,       ARM::VLD2q16,      true,  false, false, SingleSpc,  4, 4 ,false},
240 { ARM::VLD2q16PseudoWB_fixed,   ARM::VLD2q16wb_fixed, true, true, false,  SingleSpc,  4, 4 ,false},
241 { ARM::VLD2q16PseudoWB_register,   ARM::VLD2q16wb_register, true, true, true,  SingleSpc,  4, 4 ,false},
242 { ARM::VLD2q32Pseudo,       ARM::VLD2q32,      true,  false, false, SingleSpc,  4, 2 ,false},
243 { ARM::VLD2q32PseudoWB_fixed,   ARM::VLD2q32wb_fixed, true, true, false,  SingleSpc,  4, 2 ,false},
244 { ARM::VLD2q32PseudoWB_register,   ARM::VLD2q32wb_register, true, true, true,  SingleSpc,  4, 2 ,false},
245 { ARM::VLD2q8Pseudo,        ARM::VLD2q8,       true,  false, false, SingleSpc,  4, 8 ,false},
246 { ARM::VLD2q8PseudoWB_fixed,    ARM::VLD2q8wb_fixed, true, true, false,  SingleSpc,  4, 8 ,false},
247 { ARM::VLD2q8PseudoWB_register,    ARM::VLD2q8wb_register, true, true, true,  SingleSpc,  4, 8 ,false},
248 
249 { ARM::VLD3DUPd16Pseudo,     ARM::VLD3DUPd16,     true, false, false, SingleSpc, 3, 4,true},
250 { ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, true,  SingleSpc, 3, 4,true},
251 { ARM::VLD3DUPd32Pseudo,     ARM::VLD3DUPd32,     true, false, false, SingleSpc, 3, 2,true},
252 { ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, true,  SingleSpc, 3, 2,true},
253 { ARM::VLD3DUPd8Pseudo,      ARM::VLD3DUPd8,      true, false, false, SingleSpc, 3, 8,true},
254 { ARM::VLD3DUPd8Pseudo_UPD,  ARM::VLD3DUPd8_UPD, true, true, true,  SingleSpc, 3, 8,true},
255 { ARM::VLD3DUPq16EvenPseudo, ARM::VLD3DUPq16,     true, false, false, EvenDblSpc, 3, 4 ,true},
256 { ARM::VLD3DUPq16OddPseudo,  ARM::VLD3DUPq16,     true, false, false, OddDblSpc,  3, 4 ,true},
257 { ARM::VLD3DUPq32EvenPseudo, ARM::VLD3DUPq32,     true, false, false, EvenDblSpc, 3, 2 ,true},
258 { ARM::VLD3DUPq32OddPseudo,  ARM::VLD3DUPq32,     true, false, false, OddDblSpc,  3, 2 ,true},
259 { ARM::VLD3DUPq8EvenPseudo,  ARM::VLD3DUPq8,      true, false, false, EvenDblSpc, 3, 8 ,true},
260 { ARM::VLD3DUPq8OddPseudo,   ARM::VLD3DUPq8,      true, false, false, OddDblSpc,  3, 8 ,true},
261 
262 { ARM::VLD3LNd16Pseudo,     ARM::VLD3LNd16,     true, false, false, SingleSpc,  3, 4 ,true},
263 { ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, true,  SingleSpc,  3, 4 ,true},
264 { ARM::VLD3LNd32Pseudo,     ARM::VLD3LNd32,     true, false, false, SingleSpc,  3, 2 ,true},
265 { ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, true,  SingleSpc,  3, 2 ,true},
266 { ARM::VLD3LNd8Pseudo,      ARM::VLD3LNd8,      true, false, false, SingleSpc,  3, 8 ,true},
267 { ARM::VLD3LNd8Pseudo_UPD,  ARM::VLD3LNd8_UPD, true, true, true,  SingleSpc,  3, 8 ,true},
268 { ARM::VLD3LNq16Pseudo,     ARM::VLD3LNq16,     true, false, false, EvenDblSpc, 3, 4 ,true},
269 { ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, true,  EvenDblSpc, 3, 4 ,true},
270 { ARM::VLD3LNq32Pseudo,     ARM::VLD3LNq32,     true, false, false, EvenDblSpc, 3, 2 ,true},
271 { ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, true,  EvenDblSpc, 3, 2 ,true},
272 
273 { ARM::VLD3d16Pseudo,       ARM::VLD3d16,      true,  false, false, SingleSpc,  3, 4 ,true},
274 { ARM::VLD3d16Pseudo_UPD,   ARM::VLD3d16_UPD, true, true, true,  SingleSpc,  3, 4 ,true},
275 { ARM::VLD3d32Pseudo,       ARM::VLD3d32,      true,  false, false, SingleSpc,  3, 2 ,true},
276 { ARM::VLD3d32Pseudo_UPD,   ARM::VLD3d32_UPD, true, true, true,  SingleSpc,  3, 2 ,true},
277 { ARM::VLD3d8Pseudo,        ARM::VLD3d8,       true,  false, false, SingleSpc,  3, 8 ,true},
278 { ARM::VLD3d8Pseudo_UPD,    ARM::VLD3d8_UPD, true, true, true,  SingleSpc,  3, 8 ,true},
279 
280 { ARM::VLD3q16Pseudo_UPD,    ARM::VLD3q16_UPD, true, true, true,  EvenDblSpc, 3, 4 ,true},
281 { ARM::VLD3q16oddPseudo,     ARM::VLD3q16,     true,  false, false, OddDblSpc,  3, 4 ,true},
282 { ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, true,  OddDblSpc,  3, 4 ,true},
283 { ARM::VLD3q32Pseudo_UPD,    ARM::VLD3q32_UPD, true, true, true,  EvenDblSpc, 3, 2 ,true},
284 { ARM::VLD3q32oddPseudo,     ARM::VLD3q32,     true,  false, false, OddDblSpc,  3, 2 ,true},
285 { ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, true,  OddDblSpc,  3, 2 ,true},
286 { ARM::VLD3q8Pseudo_UPD,     ARM::VLD3q8_UPD, true, true, true,  EvenDblSpc, 3, 8 ,true},
287 { ARM::VLD3q8oddPseudo,      ARM::VLD3q8,      true,  false, false, OddDblSpc,  3, 8 ,true},
288 { ARM::VLD3q8oddPseudo_UPD,  ARM::VLD3q8_UPD, true, true, true,  OddDblSpc,  3, 8 ,true},
289 
290 { ARM::VLD4DUPd16Pseudo,     ARM::VLD4DUPd16,     true, false, false, SingleSpc, 4, 4,true},
291 { ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, true,  SingleSpc, 4, 4,true},
292 { ARM::VLD4DUPd32Pseudo,     ARM::VLD4DUPd32,     true, false, false, SingleSpc, 4, 2,true},
293 { ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, true,  SingleSpc, 4, 2,true},
294 { ARM::VLD4DUPd8Pseudo,      ARM::VLD4DUPd8,      true, false, false, SingleSpc, 4, 8,true},
295 { ARM::VLD4DUPd8Pseudo_UPD,  ARM::VLD4DUPd8_UPD, true, true, true,  SingleSpc, 4, 8,true},
296 { ARM::VLD4DUPq16EvenPseudo, ARM::VLD4DUPq16,     true, false, false, EvenDblSpc, 4, 4 ,true},
297 { ARM::VLD4DUPq16OddPseudo,  ARM::VLD4DUPq16,     true, false, false, OddDblSpc,  4, 4 ,true},
298 { ARM::VLD4DUPq32EvenPseudo, ARM::VLD4DUPq32,     true, false, false, EvenDblSpc, 4, 2 ,true},
299 { ARM::VLD4DUPq32OddPseudo,  ARM::VLD4DUPq32,     true, false, false, OddDblSpc,  4, 2 ,true},
300 { ARM::VLD4DUPq8EvenPseudo,  ARM::VLD4DUPq8,      true, false, false, EvenDblSpc, 4, 8 ,true},
301 { ARM::VLD4DUPq8OddPseudo,   ARM::VLD4DUPq8,      true, false, false, OddDblSpc,  4, 8 ,true},
302 
303 { ARM::VLD4LNd16Pseudo,     ARM::VLD4LNd16,     true, false, false, SingleSpc,  4, 4 ,true},
304 { ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, true,  SingleSpc,  4, 4 ,true},
305 { ARM::VLD4LNd32Pseudo,     ARM::VLD4LNd32,     true, false, false, SingleSpc,  4, 2 ,true},
306 { ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, true,  SingleSpc,  4, 2 ,true},
307 { ARM::VLD4LNd8Pseudo,      ARM::VLD4LNd8,      true, false, false, SingleSpc,  4, 8 ,true},
308 { ARM::VLD4LNd8Pseudo_UPD,  ARM::VLD4LNd8_UPD, true, true, true,  SingleSpc,  4, 8 ,true},
309 { ARM::VLD4LNq16Pseudo,     ARM::VLD4LNq16,     true, false, false, EvenDblSpc, 4, 4 ,true},
310 { ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, true,  EvenDblSpc, 4, 4 ,true},
311 { ARM::VLD4LNq32Pseudo,     ARM::VLD4LNq32,     true, false, false, EvenDblSpc, 4, 2 ,true},
312 { ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, true,  EvenDblSpc, 4, 2 ,true},
313 
314 { ARM::VLD4d16Pseudo,       ARM::VLD4d16,      true,  false, false, SingleSpc,  4, 4 ,true},
315 { ARM::VLD4d16Pseudo_UPD,   ARM::VLD4d16_UPD, true, true, true,  SingleSpc,  4, 4 ,true},
316 { ARM::VLD4d32Pseudo,       ARM::VLD4d32,      true,  false, false, SingleSpc,  4, 2 ,true},
317 { ARM::VLD4d32Pseudo_UPD,   ARM::VLD4d32_UPD, true, true, true,  SingleSpc,  4, 2 ,true},
318 { ARM::VLD4d8Pseudo,        ARM::VLD4d8,       true,  false, false, SingleSpc,  4, 8 ,true},
319 { ARM::VLD4d8Pseudo_UPD,    ARM::VLD4d8_UPD, true, true, true,  SingleSpc,  4, 8 ,true},
320 
321 { ARM::VLD4q16Pseudo_UPD,    ARM::VLD4q16_UPD, true, true, true,  EvenDblSpc, 4, 4 ,true},
322 { ARM::VLD4q16oddPseudo,     ARM::VLD4q16,     true,  false, false, OddDblSpc,  4, 4 ,true},
323 { ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, true,  OddDblSpc,  4, 4 ,true},
324 { ARM::VLD4q32Pseudo_UPD,    ARM::VLD4q32_UPD, true, true, true,  EvenDblSpc, 4, 2 ,true},
325 { ARM::VLD4q32oddPseudo,     ARM::VLD4q32,     true,  false, false, OddDblSpc,  4, 2 ,true},
326 { ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, true,  OddDblSpc,  4, 2 ,true},
327 { ARM::VLD4q8Pseudo_UPD,     ARM::VLD4q8_UPD, true, true, true,  EvenDblSpc, 4, 8 ,true},
328 { ARM::VLD4q8oddPseudo,      ARM::VLD4q8,      true,  false, false, OddDblSpc,  4, 8 ,true},
329 { ARM::VLD4q8oddPseudo_UPD,  ARM::VLD4q8_UPD, true, true, true,  OddDblSpc,  4, 8 ,true},
330 
331 { ARM::VST1LNq16Pseudo,     ARM::VST1LNd16,    false, false, false, EvenDblSpc, 1, 4 ,true},
332 { ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD, false, true, true,  EvenDblSpc, 1, 4 ,true},
333 { ARM::VST1LNq32Pseudo,     ARM::VST1LNd32,    false, false, false, EvenDblSpc, 1, 2 ,true},
334 { ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD, false, true, true,  EvenDblSpc, 1, 2 ,true},
335 { ARM::VST1LNq8Pseudo,      ARM::VST1LNd8,     false, false, false, EvenDblSpc, 1, 8 ,true},
336 { ARM::VST1LNq8Pseudo_UPD,  ARM::VST1LNd8_UPD, false, true, true,  EvenDblSpc, 1, 8 ,true},
337 
338 { ARM::VST1d16QPseudo,      ARM::VST1d16Q,     false, false, false, SingleSpc,  4, 4 ,false},
339 { ARM::VST1d16TPseudo,      ARM::VST1d16T,     false, false, false, SingleSpc,  3, 4 ,false},
340 { ARM::VST1d32QPseudo,      ARM::VST1d32Q,     false, false, false, SingleSpc,  4, 2 ,false},
341 { ARM::VST1d32TPseudo,      ARM::VST1d32T,     false, false, false, SingleSpc,  3, 2 ,false},
342 { ARM::VST1d64QPseudo,      ARM::VST1d64Q,     false, false, false, SingleSpc,  4, 1 ,false},
343 { ARM::VST1d64QPseudoWB_fixed,  ARM::VST1d64Qwb_fixed, false, true, false,  SingleSpc,  4, 1 ,false},
344 { ARM::VST1d64QPseudoWB_register, ARM::VST1d64Qwb_register, false, true, true,  SingleSpc,  4, 1 ,false},
345 { ARM::VST1d64TPseudo,      ARM::VST1d64T,     false, false, false, SingleSpc,  3, 1 ,false},
346 { ARM::VST1d64TPseudoWB_fixed,  ARM::VST1d64Twb_fixed, false, true, false,  SingleSpc,  3, 1 ,false},
347 { ARM::VST1d64TPseudoWB_register,  ARM::VST1d64Twb_register, false, true, true,  SingleSpc,  3, 1 ,false},
348 { ARM::VST1d8QPseudo,       ARM::VST1d8Q,      false, false, false, SingleSpc,  4, 8 ,false},
349 { ARM::VST1d8TPseudo,       ARM::VST1d8T,      false, false, false, SingleSpc,  3, 8 ,false},
350 { ARM::VST1q16HighQPseudo,  ARM::VST1d16Q,      false, false, false, SingleHighQSpc,   4, 4 ,false},
351 { ARM::VST1q16HighTPseudo,  ARM::VST1d16T,      false, false, false, SingleHighTSpc,   3, 4 ,false},
352 { ARM::VST1q16LowQPseudo_UPD,   ARM::VST1d16Qwb_fixed,  false, true, true, SingleLowSpc,   4, 4 ,false},
353 { ARM::VST1q16LowTPseudo_UPD,   ARM::VST1d16Twb_fixed,  false, true, true, SingleLowSpc,   3, 4 ,false},
354 { ARM::VST1q32HighQPseudo,  ARM::VST1d32Q,      false, false, false, SingleHighQSpc,   4, 2 ,false},
355 { ARM::VST1q32HighTPseudo,  ARM::VST1d32T,      false, false, false, SingleHighTSpc,   3, 2 ,false},
356 { ARM::VST1q32LowQPseudo_UPD,   ARM::VST1d32Qwb_fixed,  false, true, true, SingleLowSpc,   4, 2 ,false},
357 { ARM::VST1q32LowTPseudo_UPD,   ARM::VST1d32Twb_fixed,  false, true, true, SingleLowSpc,   3, 2 ,false},
358 { ARM::VST1q64HighQPseudo,  ARM::VST1d64Q,      false, false, false, SingleHighQSpc,   4, 1 ,false},
359 { ARM::VST1q64HighTPseudo,  ARM::VST1d64T,      false, false, false, SingleHighTSpc,   3, 1 ,false},
360 { ARM::VST1q64LowQPseudo_UPD,   ARM::VST1d64Qwb_fixed,  false, true, true, SingleLowSpc,   4, 1 ,false},
361 { ARM::VST1q64LowTPseudo_UPD,   ARM::VST1d64Twb_fixed,  false, true, true, SingleLowSpc,   3, 1 ,false},
362 { ARM::VST1q8HighQPseudo,   ARM::VST1d8Q,      false, false, false, SingleHighQSpc,   4, 8 ,false},
363 { ARM::VST1q8HighTPseudo,   ARM::VST1d8T,      false, false, false, SingleHighTSpc,   3, 8 ,false},
364 { ARM::VST1q8LowQPseudo_UPD,   ARM::VST1d8Qwb_fixed,  false, true, true, SingleLowSpc,   4, 8 ,false},
365 { ARM::VST1q8LowTPseudo_UPD,   ARM::VST1d8Twb_fixed,  false, true, true, SingleLowSpc,   3, 8 ,false},
366 
367 { ARM::VST2LNd16Pseudo,     ARM::VST2LNd16,     false, false, false, SingleSpc, 2, 4 ,true},
368 { ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, true,  SingleSpc, 2, 4 ,true},
369 { ARM::VST2LNd32Pseudo,     ARM::VST2LNd32,     false, false, false, SingleSpc, 2, 2 ,true},
370 { ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, true,  SingleSpc, 2, 2 ,true},
371 { ARM::VST2LNd8Pseudo,      ARM::VST2LNd8,      false, false, false, SingleSpc, 2, 8 ,true},
372 { ARM::VST2LNd8Pseudo_UPD,  ARM::VST2LNd8_UPD, false, true, true,  SingleSpc, 2, 8 ,true},
373 { ARM::VST2LNq16Pseudo,     ARM::VST2LNq16,     false, false, false, EvenDblSpc, 2, 4,true},
374 { ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, true,  EvenDblSpc, 2, 4,true},
375 { ARM::VST2LNq32Pseudo,     ARM::VST2LNq32,     false, false, false, EvenDblSpc, 2, 2,true},
376 { ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, true,  EvenDblSpc, 2, 2,true},
377 
378 { ARM::VST2q16Pseudo,       ARM::VST2q16,      false, false, false, SingleSpc,  4, 4 ,false},
379 { ARM::VST2q16PseudoWB_fixed,   ARM::VST2q16wb_fixed, false, true, false,  SingleSpc,  4, 4 ,false},
380 { ARM::VST2q16PseudoWB_register,   ARM::VST2q16wb_register, false, true, true,  SingleSpc,  4, 4 ,false},
381 { ARM::VST2q32Pseudo,       ARM::VST2q32,      false, false, false, SingleSpc,  4, 2 ,false},
382 { ARM::VST2q32PseudoWB_fixed,   ARM::VST2q32wb_fixed, false, true, false,  SingleSpc,  4, 2 ,false},
383 { ARM::VST2q32PseudoWB_register,   ARM::VST2q32wb_register, false, true, true,  SingleSpc,  4, 2 ,false},
384 { ARM::VST2q8Pseudo,        ARM::VST2q8,       false, false, false, SingleSpc,  4, 8 ,false},
385 { ARM::VST2q8PseudoWB_fixed,    ARM::VST2q8wb_fixed, false, true, false,  SingleSpc,  4, 8 ,false},
386 { ARM::VST2q8PseudoWB_register,    ARM::VST2q8wb_register, false, true, true,  SingleSpc,  4, 8 ,false},
387 
388 { ARM::VST3LNd16Pseudo,     ARM::VST3LNd16,     false, false, false, SingleSpc, 3, 4 ,true},
389 { ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, true,  SingleSpc, 3, 4 ,true},
390 { ARM::VST3LNd32Pseudo,     ARM::VST3LNd32,     false, false, false, SingleSpc, 3, 2 ,true},
391 { ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, true,  SingleSpc, 3, 2 ,true},
392 { ARM::VST3LNd8Pseudo,      ARM::VST3LNd8,      false, false, false, SingleSpc, 3, 8 ,true},
393 { ARM::VST3LNd8Pseudo_UPD,  ARM::VST3LNd8_UPD, false, true, true,  SingleSpc, 3, 8 ,true},
394 { ARM::VST3LNq16Pseudo,     ARM::VST3LNq16,     false, false, false, EvenDblSpc, 3, 4,true},
395 { ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, true,  EvenDblSpc, 3, 4,true},
396 { ARM::VST3LNq32Pseudo,     ARM::VST3LNq32,     false, false, false, EvenDblSpc, 3, 2,true},
397 { ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, true,  EvenDblSpc, 3, 2,true},
398 
399 { ARM::VST3d16Pseudo,       ARM::VST3d16,      false, false, false, SingleSpc,  3, 4 ,true},
400 { ARM::VST3d16Pseudo_UPD,   ARM::VST3d16_UPD, false, true, true,  SingleSpc,  3, 4 ,true},
401 { ARM::VST3d32Pseudo,       ARM::VST3d32,      false, false, false, SingleSpc,  3, 2 ,true},
402 { ARM::VST3d32Pseudo_UPD,   ARM::VST3d32_UPD, false, true, true,  SingleSpc,  3, 2 ,true},
403 { ARM::VST3d8Pseudo,        ARM::VST3d8,       false, false, false, SingleSpc,  3, 8 ,true},
404 { ARM::VST3d8Pseudo_UPD,    ARM::VST3d8_UPD, false, true, true,  SingleSpc,  3, 8 ,true},
405 
406 { ARM::VST3q16Pseudo_UPD,    ARM::VST3q16_UPD, false, true, true,  EvenDblSpc, 3, 4 ,true},
407 { ARM::VST3q16oddPseudo,     ARM::VST3q16,     false, false, false, OddDblSpc,  3, 4 ,true},
408 { ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, true,  OddDblSpc,  3, 4 ,true},
409 { ARM::VST3q32Pseudo_UPD,    ARM::VST3q32_UPD, false, true, true,  EvenDblSpc, 3, 2 ,true},
410 { ARM::VST3q32oddPseudo,     ARM::VST3q32,     false, false, false, OddDblSpc,  3, 2 ,true},
411 { ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, true,  OddDblSpc,  3, 2 ,true},
412 { ARM::VST3q8Pseudo_UPD,     ARM::VST3q8_UPD, false, true, true,  EvenDblSpc, 3, 8 ,true},
413 { ARM::VST3q8oddPseudo,      ARM::VST3q8,      false, false, false, OddDblSpc,  3, 8 ,true},
414 { ARM::VST3q8oddPseudo_UPD,  ARM::VST3q8_UPD, false, true, true,  OddDblSpc,  3, 8 ,true},
415 
416 { ARM::VST4LNd16Pseudo,     ARM::VST4LNd16,     false, false, false, SingleSpc, 4, 4 ,true},
417 { ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, true,  SingleSpc, 4, 4 ,true},
418 { ARM::VST4LNd32Pseudo,     ARM::VST4LNd32,     false, false, false, SingleSpc, 4, 2 ,true},
419 { ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, true,  SingleSpc, 4, 2 ,true},
420 { ARM::VST4LNd8Pseudo,      ARM::VST4LNd8,      false, false, false, SingleSpc, 4, 8 ,true},
421 { ARM::VST4LNd8Pseudo_UPD,  ARM::VST4LNd8_UPD, false, true, true,  SingleSpc, 4, 8 ,true},
422 { ARM::VST4LNq16Pseudo,     ARM::VST4LNq16,     false, false, false, EvenDblSpc, 4, 4,true},
423 { ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, true,  EvenDblSpc, 4, 4,true},
424 { ARM::VST4LNq32Pseudo,     ARM::VST4LNq32,     false, false, false, EvenDblSpc, 4, 2,true},
425 { ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, true,  EvenDblSpc, 4, 2,true},
426 
427 { ARM::VST4d16Pseudo,       ARM::VST4d16,      false, false, false, SingleSpc,  4, 4 ,true},
428 { ARM::VST4d16Pseudo_UPD,   ARM::VST4d16_UPD, false, true, true,  SingleSpc,  4, 4 ,true},
429 { ARM::VST4d32Pseudo,       ARM::VST4d32,      false, false, false, SingleSpc,  4, 2 ,true},
430 { ARM::VST4d32Pseudo_UPD,   ARM::VST4d32_UPD, false, true, true,  SingleSpc,  4, 2 ,true},
431 { ARM::VST4d8Pseudo,        ARM::VST4d8,       false, false, false, SingleSpc,  4, 8 ,true},
432 { ARM::VST4d8Pseudo_UPD,    ARM::VST4d8_UPD, false, true, true,  SingleSpc,  4, 8 ,true},
433 
434 { ARM::VST4q16Pseudo_UPD,    ARM::VST4q16_UPD, false, true, true,  EvenDblSpc, 4, 4 ,true},
435 { ARM::VST4q16oddPseudo,     ARM::VST4q16,     false, false, false, OddDblSpc,  4, 4 ,true},
436 { ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, true,  OddDblSpc,  4, 4 ,true},
437 { ARM::VST4q32Pseudo_UPD,    ARM::VST4q32_UPD, false, true, true,  EvenDblSpc, 4, 2 ,true},
438 { ARM::VST4q32oddPseudo,     ARM::VST4q32,     false, false, false, OddDblSpc,  4, 2 ,true},
439 { ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, true,  OddDblSpc,  4, 2 ,true},
440 { ARM::VST4q8Pseudo_UPD,     ARM::VST4q8_UPD, false, true, true,  EvenDblSpc, 4, 8 ,true},
441 { ARM::VST4q8oddPseudo,      ARM::VST4q8,      false, false, false, OddDblSpc,  4, 8 ,true},
442 { ARM::VST4q8oddPseudo_UPD,  ARM::VST4q8_UPD, false, true, true,  OddDblSpc,  4, 8 ,true}
443 };
444 
445 /// LookupNEONLdSt - Search the NEONLdStTable for information about a NEON
446 /// load or store pseudo instruction.
LookupNEONLdSt(unsigned Opcode)447 static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
448 #ifndef NDEBUG
449   // Make sure the table is sorted.
450   static std::atomic<bool> TableChecked(false);
451   if (!TableChecked.load(std::memory_order_relaxed)) {
452     assert(llvm::is_sorted(NEONLdStTable) && "NEONLdStTable is not sorted!");
453     TableChecked.store(true, std::memory_order_relaxed);
454   }
455 #endif
456 
457   auto I = llvm::lower_bound(NEONLdStTable, Opcode);
458   if (I != std::end(NEONLdStTable) && I->PseudoOpc == Opcode)
459     return I;
460   return nullptr;
461 }
462 
463 /// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register,
464 /// corresponding to the specified register spacing.  Not all of the results
465 /// are necessarily valid, e.g., a Q register only has 2 D subregisters.
GetDSubRegs(unsigned Reg,NEONRegSpacing RegSpc,const TargetRegisterInfo * TRI,unsigned & D0,unsigned & D1,unsigned & D2,unsigned & D3)466 static void GetDSubRegs(unsigned Reg, NEONRegSpacing RegSpc,
467                         const TargetRegisterInfo *TRI, unsigned &D0,
468                         unsigned &D1, unsigned &D2, unsigned &D3) {
469   if (RegSpc == SingleSpc || RegSpc == SingleLowSpc) {
470     D0 = TRI->getSubReg(Reg, ARM::dsub_0);
471     D1 = TRI->getSubReg(Reg, ARM::dsub_1);
472     D2 = TRI->getSubReg(Reg, ARM::dsub_2);
473     D3 = TRI->getSubReg(Reg, ARM::dsub_3);
474   } else if (RegSpc == SingleHighQSpc) {
475     D0 = TRI->getSubReg(Reg, ARM::dsub_4);
476     D1 = TRI->getSubReg(Reg, ARM::dsub_5);
477     D2 = TRI->getSubReg(Reg, ARM::dsub_6);
478     D3 = TRI->getSubReg(Reg, ARM::dsub_7);
479   } else if (RegSpc == SingleHighTSpc) {
480     D0 = TRI->getSubReg(Reg, ARM::dsub_3);
481     D1 = TRI->getSubReg(Reg, ARM::dsub_4);
482     D2 = TRI->getSubReg(Reg, ARM::dsub_5);
483     D3 = TRI->getSubReg(Reg, ARM::dsub_6);
484   } else if (RegSpc == EvenDblSpc) {
485     D0 = TRI->getSubReg(Reg, ARM::dsub_0);
486     D1 = TRI->getSubReg(Reg, ARM::dsub_2);
487     D2 = TRI->getSubReg(Reg, ARM::dsub_4);
488     D3 = TRI->getSubReg(Reg, ARM::dsub_6);
489   } else {
490     assert(RegSpc == OddDblSpc && "unknown register spacing");
491     D0 = TRI->getSubReg(Reg, ARM::dsub_1);
492     D1 = TRI->getSubReg(Reg, ARM::dsub_3);
493     D2 = TRI->getSubReg(Reg, ARM::dsub_5);
494     D3 = TRI->getSubReg(Reg, ARM::dsub_7);
495   }
496 }
497 
498 /// ExpandVLD - Translate VLD pseudo instructions with Q, QQ or QQQQ register
499 /// operands to real VLD instructions with D register operands.
ExpandVLD(MachineBasicBlock::iterator & MBBI)500 void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
501   MachineInstr &MI = *MBBI;
502   MachineBasicBlock &MBB = *MI.getParent();
503   LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
504 
505   const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
506   assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed");
507   NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
508   unsigned NumRegs = TableEntry->NumRegs;
509 
510   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
511                                     TII->get(TableEntry->RealOpc));
512   unsigned OpIdx = 0;
513 
514   bool DstIsDead = MI.getOperand(OpIdx).isDead();
515   Register DstReg = MI.getOperand(OpIdx++).getReg();
516   if(TableEntry->RealOpc == ARM::VLD2DUPd8x2 ||
517      TableEntry->RealOpc == ARM::VLD2DUPd16x2 ||
518      TableEntry->RealOpc == ARM::VLD2DUPd32x2) {
519     unsigned SubRegIndex;
520     if (RegSpc == EvenDblSpc) {
521       SubRegIndex = ARM::dsub_0;
522     } else {
523       assert(RegSpc == OddDblSpc && "Unexpected spacing!");
524       SubRegIndex = ARM::dsub_1;
525     }
526     Register SubReg = TRI->getSubReg(DstReg, SubRegIndex);
527     unsigned DstRegPair = TRI->getMatchingSuperReg(SubReg, ARM::dsub_0,
528                                                    &ARM::DPairSpcRegClass);
529     MIB.addReg(DstRegPair, RegState::Define | getDeadRegState(DstIsDead));
530   } else {
531     unsigned D0, D1, D2, D3;
532     GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
533     MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
534     if (NumRegs > 1 && TableEntry->copyAllListRegs)
535       MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
536     if (NumRegs > 2 && TableEntry->copyAllListRegs)
537       MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
538     if (NumRegs > 3 && TableEntry->copyAllListRegs)
539       MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
540   }
541 
542   if (TableEntry->isUpdating)
543     MIB.add(MI.getOperand(OpIdx++));
544 
545   // Copy the addrmode6 operands.
546   MIB.add(MI.getOperand(OpIdx++));
547   MIB.add(MI.getOperand(OpIdx++));
548 
549   // Copy the am6offset operand.
550   if (TableEntry->hasWritebackOperand) {
551     // TODO: The writing-back pseudo instructions we translate here are all
552     // defined to take am6offset nodes that are capable to represent both fixed
553     // and register forms. Some real instructions, however, do not rely on
554     // am6offset and have separate definitions for such forms. When this is the
555     // case, fixed forms do not take any offset nodes, so here we skip them for
556     // such instructions. Once all real and pseudo writing-back instructions are
557     // rewritten without use of am6offset nodes, this code will go away.
558     const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
559     if (TableEntry->RealOpc == ARM::VLD1d8Qwb_fixed ||
560         TableEntry->RealOpc == ARM::VLD1d16Qwb_fixed ||
561         TableEntry->RealOpc == ARM::VLD1d32Qwb_fixed ||
562         TableEntry->RealOpc == ARM::VLD1d64Qwb_fixed ||
563         TableEntry->RealOpc == ARM::VLD1d8Twb_fixed ||
564         TableEntry->RealOpc == ARM::VLD1d16Twb_fixed ||
565         TableEntry->RealOpc == ARM::VLD1d32Twb_fixed ||
566         TableEntry->RealOpc == ARM::VLD1d64Twb_fixed) {
567       assert(AM6Offset.getReg() == 0 &&
568              "A fixed writing-back pseudo instruction provides an offset "
569              "register!");
570     } else {
571       MIB.add(AM6Offset);
572     }
573   }
574 
575   // For an instruction writing double-spaced subregs, the pseudo instruction
576   // has an extra operand that is a use of the super-register.  Record the
577   // operand index and skip over it.
578   unsigned SrcOpIdx = 0;
579   if(TableEntry->RealOpc != ARM::VLD2DUPd8x2 &&
580      TableEntry->RealOpc != ARM::VLD2DUPd16x2 &&
581      TableEntry->RealOpc != ARM::VLD2DUPd32x2) {
582     if (RegSpc == EvenDblSpc || RegSpc == OddDblSpc ||
583         RegSpc == SingleLowSpc || RegSpc == SingleHighQSpc ||
584         RegSpc == SingleHighTSpc)
585       SrcOpIdx = OpIdx++;
586   }
587 
588   // Copy the predicate operands.
589   MIB.add(MI.getOperand(OpIdx++));
590   MIB.add(MI.getOperand(OpIdx++));
591 
592   // Copy the super-register source operand used for double-spaced subregs over
593   // to the new instruction as an implicit operand.
594   if (SrcOpIdx != 0) {
595     MachineOperand MO = MI.getOperand(SrcOpIdx);
596     MO.setImplicit(true);
597     MIB.add(MO);
598   }
599   // Add an implicit def for the super-register.
600   MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
601   TransferImpOps(MI, MIB, MIB);
602 
603   // Transfer memoperands.
604   MIB.cloneMemRefs(MI);
605   MI.eraseFromParent();
606   LLVM_DEBUG(dbgs() << "To:        "; MIB.getInstr()->dump(););
607 }
608 
609 /// ExpandVST - Translate VST pseudo instructions with Q, QQ or QQQQ register
610 /// operands to real VST instructions with D register operands.
ExpandVST(MachineBasicBlock::iterator & MBBI)611 void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
612   MachineInstr &MI = *MBBI;
613   MachineBasicBlock &MBB = *MI.getParent();
614   LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
615 
616   const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
617   assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed");
618   NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
619   unsigned NumRegs = TableEntry->NumRegs;
620 
621   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
622                                     TII->get(TableEntry->RealOpc));
623   unsigned OpIdx = 0;
624   if (TableEntry->isUpdating)
625     MIB.add(MI.getOperand(OpIdx++));
626 
627   // Copy the addrmode6 operands.
628   MIB.add(MI.getOperand(OpIdx++));
629   MIB.add(MI.getOperand(OpIdx++));
630 
631   if (TableEntry->hasWritebackOperand) {
632     // TODO: The writing-back pseudo instructions we translate here are all
633     // defined to take am6offset nodes that are capable to represent both fixed
634     // and register forms. Some real instructions, however, do not rely on
635     // am6offset and have separate definitions for such forms. When this is the
636     // case, fixed forms do not take any offset nodes, so here we skip them for
637     // such instructions. Once all real and pseudo writing-back instructions are
638     // rewritten without use of am6offset nodes, this code will go away.
639     const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
640     if (TableEntry->RealOpc == ARM::VST1d8Qwb_fixed ||
641         TableEntry->RealOpc == ARM::VST1d16Qwb_fixed ||
642         TableEntry->RealOpc == ARM::VST1d32Qwb_fixed ||
643         TableEntry->RealOpc == ARM::VST1d64Qwb_fixed ||
644         TableEntry->RealOpc == ARM::VST1d8Twb_fixed ||
645         TableEntry->RealOpc == ARM::VST1d16Twb_fixed ||
646         TableEntry->RealOpc == ARM::VST1d32Twb_fixed ||
647         TableEntry->RealOpc == ARM::VST1d64Twb_fixed) {
648       assert(AM6Offset.getReg() == 0 &&
649              "A fixed writing-back pseudo instruction provides an offset "
650              "register!");
651     } else {
652       MIB.add(AM6Offset);
653     }
654   }
655 
656   bool SrcIsKill = MI.getOperand(OpIdx).isKill();
657   bool SrcIsUndef = MI.getOperand(OpIdx).isUndef();
658   Register SrcReg = MI.getOperand(OpIdx++).getReg();
659   unsigned D0, D1, D2, D3;
660   GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
661   MIB.addReg(D0, getUndefRegState(SrcIsUndef));
662   if (NumRegs > 1 && TableEntry->copyAllListRegs)
663     MIB.addReg(D1, getUndefRegState(SrcIsUndef));
664   if (NumRegs > 2 && TableEntry->copyAllListRegs)
665     MIB.addReg(D2, getUndefRegState(SrcIsUndef));
666   if (NumRegs > 3 && TableEntry->copyAllListRegs)
667     MIB.addReg(D3, getUndefRegState(SrcIsUndef));
668 
669   // Copy the predicate operands.
670   MIB.add(MI.getOperand(OpIdx++));
671   MIB.add(MI.getOperand(OpIdx++));
672 
673   if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg.
674     MIB->addRegisterKilled(SrcReg, TRI, true);
675   else if (!SrcIsUndef)
676     MIB.addReg(SrcReg, RegState::Implicit); // Add implicit uses for src reg.
677   TransferImpOps(MI, MIB, MIB);
678 
679   // Transfer memoperands.
680   MIB.cloneMemRefs(MI);
681   MI.eraseFromParent();
682   LLVM_DEBUG(dbgs() << "To:        "; MIB.getInstr()->dump(););
683 }
684 
685 /// ExpandLaneOp - Translate VLD*LN and VST*LN instructions with Q, QQ or QQQQ
686 /// register operands to real instructions with D register operands.
ExpandLaneOp(MachineBasicBlock::iterator & MBBI)687 void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
688   MachineInstr &MI = *MBBI;
689   MachineBasicBlock &MBB = *MI.getParent();
690   LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
691 
692   const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
693   assert(TableEntry && "NEONLdStTable lookup failed");
694   NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
695   unsigned NumRegs = TableEntry->NumRegs;
696   unsigned RegElts = TableEntry->RegElts;
697 
698   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
699                                     TII->get(TableEntry->RealOpc));
700   unsigned OpIdx = 0;
701   // The lane operand is always the 3rd from last operand, before the 2
702   // predicate operands.
703   unsigned Lane = MI.getOperand(MI.getDesc().getNumOperands() - 3).getImm();
704 
705   // Adjust the lane and spacing as needed for Q registers.
706   assert(RegSpc != OddDblSpc && "unexpected register spacing for VLD/VST-lane");
707   if (RegSpc == EvenDblSpc && Lane >= RegElts) {
708     RegSpc = OddDblSpc;
709     Lane -= RegElts;
710   }
711   assert(Lane < RegElts && "out of range lane for VLD/VST-lane");
712 
713   unsigned D0 = 0, D1 = 0, D2 = 0, D3 = 0;
714   unsigned DstReg = 0;
715   bool DstIsDead = false;
716   if (TableEntry->IsLoad) {
717     DstIsDead = MI.getOperand(OpIdx).isDead();
718     DstReg = MI.getOperand(OpIdx++).getReg();
719     GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
720     MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
721     if (NumRegs > 1)
722       MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
723     if (NumRegs > 2)
724       MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
725     if (NumRegs > 3)
726       MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
727   }
728 
729   if (TableEntry->isUpdating)
730     MIB.add(MI.getOperand(OpIdx++));
731 
732   // Copy the addrmode6 operands.
733   MIB.add(MI.getOperand(OpIdx++));
734   MIB.add(MI.getOperand(OpIdx++));
735   // Copy the am6offset operand.
736   if (TableEntry->hasWritebackOperand)
737     MIB.add(MI.getOperand(OpIdx++));
738 
739   // Grab the super-register source.
740   MachineOperand MO = MI.getOperand(OpIdx++);
741   if (!TableEntry->IsLoad)
742     GetDSubRegs(MO.getReg(), RegSpc, TRI, D0, D1, D2, D3);
743 
744   // Add the subregs as sources of the new instruction.
745   unsigned SrcFlags = (getUndefRegState(MO.isUndef()) |
746                        getKillRegState(MO.isKill()));
747   MIB.addReg(D0, SrcFlags);
748   if (NumRegs > 1)
749     MIB.addReg(D1, SrcFlags);
750   if (NumRegs > 2)
751     MIB.addReg(D2, SrcFlags);
752   if (NumRegs > 3)
753     MIB.addReg(D3, SrcFlags);
754 
755   // Add the lane number operand.
756   MIB.addImm(Lane);
757   OpIdx += 1;
758 
759   // Copy the predicate operands.
760   MIB.add(MI.getOperand(OpIdx++));
761   MIB.add(MI.getOperand(OpIdx++));
762 
763   // Copy the super-register source to be an implicit source.
764   MO.setImplicit(true);
765   MIB.add(MO);
766   if (TableEntry->IsLoad)
767     // Add an implicit def for the super-register.
768     MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
769   TransferImpOps(MI, MIB, MIB);
770   // Transfer memoperands.
771   MIB.cloneMemRefs(MI);
772   MI.eraseFromParent();
773 }
774 
775 /// ExpandVTBL - Translate VTBL and VTBX pseudo instructions with Q or QQ
776 /// register operands to real instructions with D register operands.
ExpandVTBL(MachineBasicBlock::iterator & MBBI,unsigned Opc,bool IsExt)777 void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
778                                  unsigned Opc, bool IsExt) {
779   MachineInstr &MI = *MBBI;
780   MachineBasicBlock &MBB = *MI.getParent();
781   LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
782 
783   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
784   unsigned OpIdx = 0;
785 
786   // Transfer the destination register operand.
787   MIB.add(MI.getOperand(OpIdx++));
788   if (IsExt) {
789     MachineOperand VdSrc(MI.getOperand(OpIdx++));
790     MIB.add(VdSrc);
791   }
792 
793   bool SrcIsKill = MI.getOperand(OpIdx).isKill();
794   Register SrcReg = MI.getOperand(OpIdx++).getReg();
795   unsigned D0, D1, D2, D3;
796   GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3);
797   MIB.addReg(D0);
798 
799   // Copy the other source register operand.
800   MachineOperand VmSrc(MI.getOperand(OpIdx++));
801   MIB.add(VmSrc);
802 
803   // Copy the predicate operands.
804   MIB.add(MI.getOperand(OpIdx++));
805   MIB.add(MI.getOperand(OpIdx++));
806 
807   // Add an implicit kill and use for the super-reg.
808   MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill));
809   TransferImpOps(MI, MIB, MIB);
810   MI.eraseFromParent();
811   LLVM_DEBUG(dbgs() << "To:        "; MIB.getInstr()->dump(););
812 }
813 
IsAnAddressOperand(const MachineOperand & MO)814 static bool IsAnAddressOperand(const MachineOperand &MO) {
815   // This check is overly conservative.  Unless we are certain that the machine
816   // operand is not a symbol reference, we return that it is a symbol reference.
817   // This is important as the load pair may not be split up Windows.
818   switch (MO.getType()) {
819   case MachineOperand::MO_Register:
820   case MachineOperand::MO_Immediate:
821   case MachineOperand::MO_CImmediate:
822   case MachineOperand::MO_FPImmediate:
823   case MachineOperand::MO_ShuffleMask:
824     return false;
825   case MachineOperand::MO_MachineBasicBlock:
826     return true;
827   case MachineOperand::MO_FrameIndex:
828     return false;
829   case MachineOperand::MO_ConstantPoolIndex:
830   case MachineOperand::MO_TargetIndex:
831   case MachineOperand::MO_JumpTableIndex:
832   case MachineOperand::MO_ExternalSymbol:
833   case MachineOperand::MO_GlobalAddress:
834   case MachineOperand::MO_BlockAddress:
835     return true;
836   case MachineOperand::MO_RegisterMask:
837   case MachineOperand::MO_RegisterLiveOut:
838     return false;
839   case MachineOperand::MO_Metadata:
840   case MachineOperand::MO_MCSymbol:
841     return true;
842   case MachineOperand::MO_CFIIndex:
843     return false;
844   case MachineOperand::MO_IntrinsicID:
845   case MachineOperand::MO_Predicate:
846     llvm_unreachable("should not exist post-isel");
847   }
848   llvm_unreachable("unhandled machine operand type");
849 }
850 
makeImplicit(const MachineOperand & MO)851 static MachineOperand makeImplicit(const MachineOperand &MO) {
852   MachineOperand NewMO = MO;
853   NewMO.setImplicit();
854   return NewMO;
855 }
856 
ExpandMOV32BitImm(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI)857 void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
858                                         MachineBasicBlock::iterator &MBBI) {
859   MachineInstr &MI = *MBBI;
860   unsigned Opcode = MI.getOpcode();
861   Register PredReg;
862   ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
863   Register DstReg = MI.getOperand(0).getReg();
864   bool DstIsDead = MI.getOperand(0).isDead();
865   bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
866   const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1);
867   bool RequiresBundling = STI->isTargetWindows() && IsAnAddressOperand(MO);
868   MachineInstrBuilder LO16, HI16;
869   LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
870 
871   if (!STI->hasV6T2Ops() &&
872       (Opcode == ARM::MOVi32imm || Opcode == ARM::MOVCCi32imm)) {
873     // FIXME Windows CE supports older ARM CPUs
874     assert(!STI->isTargetWindows() && "Windows on ARM requires ARMv7+");
875 
876     // Expand into a movi + orr.
877     LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi), DstReg);
878     HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::ORRri))
879       .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
880       .addReg(DstReg);
881 
882     assert (MO.isImm() && "MOVi32imm w/ non-immediate source operand!");
883     unsigned ImmVal = (unsigned)MO.getImm();
884     unsigned SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
885     unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
886     unsigned MIFlags = MI.getFlags();
887     LO16 = LO16.addImm(SOImmValV1);
888     HI16 = HI16.addImm(SOImmValV2);
889     LO16.cloneMemRefs(MI);
890     HI16.cloneMemRefs(MI);
891     LO16.setMIFlags(MIFlags);
892     HI16.setMIFlags(MIFlags);
893     LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
894     HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
895     if (isCC)
896       LO16.add(makeImplicit(MI.getOperand(1)));
897     TransferImpOps(MI, LO16, HI16);
898     MI.eraseFromParent();
899     return;
900   }
901 
902   unsigned LO16Opc = 0;
903   unsigned HI16Opc = 0;
904   unsigned MIFlags = MI.getFlags();
905   if (Opcode == ARM::t2MOVi32imm || Opcode == ARM::t2MOVCCi32imm) {
906     LO16Opc = ARM::t2MOVi16;
907     HI16Opc = ARM::t2MOVTi16;
908   } else {
909     LO16Opc = ARM::MOVi16;
910     HI16Opc = ARM::MOVTi16;
911   }
912 
913   LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LO16Opc), DstReg);
914   HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc))
915     .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
916     .addReg(DstReg);
917 
918   LO16.setMIFlags(MIFlags);
919   HI16.setMIFlags(MIFlags);
920 
921   switch (MO.getType()) {
922   case MachineOperand::MO_Immediate: {
923     unsigned Imm = MO.getImm();
924     unsigned Lo16 = Imm & 0xffff;
925     unsigned Hi16 = (Imm >> 16) & 0xffff;
926     LO16 = LO16.addImm(Lo16);
927     HI16 = HI16.addImm(Hi16);
928     break;
929   }
930   case MachineOperand::MO_ExternalSymbol: {
931     const char *ES = MO.getSymbolName();
932     unsigned TF = MO.getTargetFlags();
933     LO16 = LO16.addExternalSymbol(ES, TF | ARMII::MO_LO16);
934     HI16 = HI16.addExternalSymbol(ES, TF | ARMII::MO_HI16);
935     break;
936   }
937   default: {
938     const GlobalValue *GV = MO.getGlobal();
939     unsigned TF = MO.getTargetFlags();
940     LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
941     HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
942     break;
943   }
944   }
945 
946   LO16.cloneMemRefs(MI);
947   HI16.cloneMemRefs(MI);
948   LO16.addImm(Pred).addReg(PredReg);
949   HI16.addImm(Pred).addReg(PredReg);
950 
951   if (RequiresBundling)
952     finalizeBundle(MBB, LO16->getIterator(), MBBI->getIterator());
953 
954   if (isCC)
955     LO16.add(makeImplicit(MI.getOperand(1)));
956   TransferImpOps(MI, LO16, HI16);
957   MI.eraseFromParent();
958   LLVM_DEBUG(dbgs() << "To:        "; LO16.getInstr()->dump(););
959   LLVM_DEBUG(dbgs() << "And:       "; HI16.getInstr()->dump(););
960 }
961 
962 // The size of the area, accessed by that VLSTM/VLLDM
963 // S0-S31 + FPSCR + 8 more bytes (VPR + pad, or just pad)
964 static const int CMSE_FP_SAVE_SIZE = 136;
965 
determineGPRegsToClear(const MachineInstr & MI,const std::initializer_list<unsigned> & Regs,SmallVectorImpl<unsigned> & ClearRegs)966 static void determineGPRegsToClear(const MachineInstr &MI,
967                                    const std::initializer_list<unsigned> &Regs,
968                                    SmallVectorImpl<unsigned> &ClearRegs) {
969   SmallVector<unsigned, 4> OpRegs;
970   for (const MachineOperand &Op : MI.operands()) {
971     if (!Op.isReg() || !Op.isUse())
972       continue;
973     OpRegs.push_back(Op.getReg());
974   }
975   llvm::sort(OpRegs);
976 
977   std::set_difference(Regs.begin(), Regs.end(), OpRegs.begin(), OpRegs.end(),
978                       std::back_inserter(ClearRegs));
979 }
980 
CMSEClearGPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const SmallVectorImpl<unsigned> & ClearRegs,unsigned ClobberReg)981 void ARMExpandPseudo::CMSEClearGPRegs(
982     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
983     const DebugLoc &DL, const SmallVectorImpl<unsigned> &ClearRegs,
984     unsigned ClobberReg) {
985 
986   if (STI->hasV8_1MMainlineOps()) {
987     // Clear the registers using the CLRM instruction.
988     MachineInstrBuilder CLRM =
989         BuildMI(MBB, MBBI, DL, TII->get(ARM::t2CLRM)).add(predOps(ARMCC::AL));
990     for (unsigned R : ClearRegs)
991       CLRM.addReg(R, RegState::Define);
992     CLRM.addReg(ARM::APSR, RegState::Define);
993     CLRM.addReg(ARM::CPSR, RegState::Define | RegState::Implicit);
994   } else {
995     // Clear the registers and flags by copying ClobberReg into them.
996     // (Baseline can't do a high register clear in one instruction).
997     for (unsigned Reg : ClearRegs) {
998       if (Reg == ClobberReg)
999         continue;
1000       BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVr), Reg)
1001           .addReg(ClobberReg)
1002           .add(predOps(ARMCC::AL));
1003     }
1004 
1005     BuildMI(MBB, MBBI, DL, TII->get(ARM::t2MSR_M))
1006         .addImm(STI->hasDSP() ? 0xc00 : 0x800)
1007         .addReg(ClobberReg)
1008         .add(predOps(ARMCC::AL));
1009   }
1010 }
1011 
1012 // Find which FP registers need to be cleared.  The parameter `ClearRegs` is
1013 // initialised with all elements set to true, and this function resets all the
1014 // bits, which correspond to register uses. Returns true if any floating point
1015 // register is defined, false otherwise.
determineFPRegsToClear(const MachineInstr & MI,BitVector & ClearRegs)1016 static bool determineFPRegsToClear(const MachineInstr &MI,
1017                                    BitVector &ClearRegs) {
1018   bool DefFP = false;
1019   for (const MachineOperand &Op : MI.operands()) {
1020     if (!Op.isReg())
1021       continue;
1022 
1023     unsigned Reg = Op.getReg();
1024     if (Op.isDef()) {
1025       if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1026           (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1027           (Reg >= ARM::S0 && Reg <= ARM::S31))
1028         DefFP = true;
1029       continue;
1030     }
1031 
1032     if (Reg >= ARM::Q0 && Reg <= ARM::Q7) {
1033       int R = Reg - ARM::Q0;
1034       ClearRegs.reset(R * 4, (R + 1) * 4);
1035     } else if (Reg >= ARM::D0 && Reg <= ARM::D15) {
1036       int R = Reg - ARM::D0;
1037       ClearRegs.reset(R * 2, (R + 1) * 2);
1038     } else if (Reg >= ARM::S0 && Reg <= ARM::S31) {
1039       ClearRegs[Reg - ARM::S0] = false;
1040     }
1041   }
1042   return DefFP;
1043 }
1044 
1045 MachineBasicBlock &
CMSEClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI)1046 ARMExpandPseudo::CMSEClearFPRegs(MachineBasicBlock &MBB,
1047                                  MachineBasicBlock::iterator MBBI) {
1048   BitVector ClearRegs(16, true);
1049   (void)determineFPRegsToClear(*MBBI, ClearRegs);
1050 
1051   if (STI->hasV8_1MMainlineOps())
1052     return CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1053   else
1054     return CMSEClearFPRegsV8(MBB, MBBI, ClearRegs);
1055 }
1056 
1057 // Clear the FP registers for v8.0-M, by copying over the content
1058 // of LR. Uses R12 as a scratch register.
1059 MachineBasicBlock &
CMSEClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1060 ARMExpandPseudo::CMSEClearFPRegsV8(MachineBasicBlock &MBB,
1061                                    MachineBasicBlock::iterator MBBI,
1062                                    const BitVector &ClearRegs) {
1063   if (!STI->hasFPRegs())
1064     return MBB;
1065 
1066   auto &RetI = *MBBI;
1067   const DebugLoc &DL = RetI.getDebugLoc();
1068 
1069   // If optimising for minimum size, clear FP registers unconditionally.
1070   // Otherwise, check the CONTROL.SFPA (Secure Floating-Point Active) bit and
1071   // don't clear them if they belong to the non-secure state.
1072   MachineBasicBlock *ClearBB, *DoneBB;
1073   if (STI->hasMinSize()) {
1074     ClearBB = DoneBB = &MBB;
1075   } else {
1076     MachineFunction *MF = MBB.getParent();
1077     ClearBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1078     DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1079 
1080     MF->insert(++MBB.getIterator(), ClearBB);
1081     MF->insert(++ClearBB->getIterator(), DoneBB);
1082 
1083     DoneBB->splice(DoneBB->end(), &MBB, MBBI, MBB.end());
1084     DoneBB->transferSuccessors(&MBB);
1085     MBB.addSuccessor(ClearBB);
1086     MBB.addSuccessor(DoneBB);
1087     ClearBB->addSuccessor(DoneBB);
1088 
1089     // At the new basic blocks we need to have live-in the registers, used
1090     // for the return value as well as LR, used to clear registers.
1091     for (const MachineOperand &Op : RetI.operands()) {
1092       if (!Op.isReg())
1093         continue;
1094       Register Reg = Op.getReg();
1095       if (Reg == ARM::NoRegister || Reg == ARM::LR)
1096         continue;
1097       assert(Register::isPhysicalRegister(Reg) && "Unallocated register");
1098       ClearBB->addLiveIn(Reg);
1099       DoneBB->addLiveIn(Reg);
1100     }
1101     ClearBB->addLiveIn(ARM::LR);
1102     DoneBB->addLiveIn(ARM::LR);
1103 
1104     // Read the CONTROL register.
1105     BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2MRS_M), ARM::R12)
1106         .addImm(20)
1107         .add(predOps(ARMCC::AL));
1108     // Check bit 3 (SFPA).
1109     BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2TSTri))
1110         .addReg(ARM::R12)
1111         .addImm(8)
1112         .add(predOps(ARMCC::AL));
1113     // If SFPA is clear, jump over ClearBB to DoneBB.
1114     BuildMI(MBB, MBB.end(), DL, TII->get(ARM::tBcc))
1115         .addMBB(DoneBB)
1116         .addImm(ARMCC::EQ)
1117         .addReg(ARM::CPSR, RegState::Kill);
1118   }
1119 
1120   // Emit the clearing sequence
1121   for (unsigned D = 0; D < 8; D++) {
1122     // Attempt to clear as double
1123     if (ClearRegs[D * 2 + 0] && ClearRegs[D * 2 + 1]) {
1124       unsigned Reg = ARM::D0 + D;
1125       BuildMI(ClearBB, DL, TII->get(ARM::VMOVDRR), Reg)
1126           .addReg(ARM::LR)
1127           .addReg(ARM::LR)
1128           .add(predOps(ARMCC::AL));
1129     } else {
1130       // Clear first part as single
1131       if (ClearRegs[D * 2 + 0]) {
1132         unsigned Reg = ARM::S0 + D * 2;
1133         BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1134             .addReg(ARM::LR)
1135             .add(predOps(ARMCC::AL));
1136       }
1137       // Clear second part as single
1138       if (ClearRegs[D * 2 + 1]) {
1139         unsigned Reg = ARM::S0 + D * 2 + 1;
1140         BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1141             .addReg(ARM::LR)
1142             .add(predOps(ARMCC::AL));
1143       }
1144     }
1145   }
1146 
1147   // Clear FPSCR bits 0-4, 7, 28-31
1148   // The other bits are program global according to the AAPCS
1149   BuildMI(ClearBB, DL, TII->get(ARM::VMRS), ARM::R12)
1150       .add(predOps(ARMCC::AL));
1151   BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1152       .addReg(ARM::R12)
1153       .addImm(0x0000009F)
1154       .add(predOps(ARMCC::AL))
1155       .add(condCodeOp());
1156   BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1157       .addReg(ARM::R12)
1158       .addImm(0xF0000000)
1159       .add(predOps(ARMCC::AL))
1160       .add(condCodeOp());
1161   BuildMI(ClearBB, DL, TII->get(ARM::VMSR))
1162       .addReg(ARM::R12)
1163       .add(predOps(ARMCC::AL));
1164 
1165   return *DoneBB;
1166 }
1167 
1168 MachineBasicBlock &
CMSEClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1169 ARMExpandPseudo::CMSEClearFPRegsV81(MachineBasicBlock &MBB,
1170                                     MachineBasicBlock::iterator MBBI,
1171                                     const BitVector &ClearRegs) {
1172   auto &RetI = *MBBI;
1173 
1174   // Emit a sequence of VSCCLRM <sreglist> instructions, one instruction for
1175   // each contiguous sequence of S-registers.
1176   int Start = -1, End = -1;
1177   for (int S = 0, E = ClearRegs.size(); S != E; ++S) {
1178     if (ClearRegs[S] && S == End + 1) {
1179       End = S; // extend range
1180       continue;
1181     }
1182     // Emit current range.
1183     if (Start < End) {
1184       MachineInstrBuilder VSCCLRM =
1185           BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1186               .add(predOps(ARMCC::AL));
1187       while (++Start <= End)
1188         VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1189       VSCCLRM.addReg(ARM::VPR, RegState::Define);
1190     }
1191     Start = End = S;
1192   }
1193   // Emit last range.
1194   if (Start < End) {
1195     MachineInstrBuilder VSCCLRM =
1196         BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1197             .add(predOps(ARMCC::AL));
1198     while (++Start <= End)
1199       VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1200     VSCCLRM.addReg(ARM::VPR, RegState::Define);
1201   }
1202 
1203   return MBB;
1204 }
1205 
CMSESaveClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1206 void ARMExpandPseudo::CMSESaveClearFPRegs(
1207     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1208     const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1209   if (STI->hasV8_1MMainlineOps())
1210     CMSESaveClearFPRegsV81(MBB, MBBI, DL, LiveRegs);
1211   else
1212     CMSESaveClearFPRegsV8(MBB, MBBI, DL, LiveRegs, ScratchRegs);
1213 }
1214 
1215 // Save and clear FP registers if present
CMSESaveClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1216 void ARMExpandPseudo::CMSESaveClearFPRegsV8(
1217     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1218     const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1219   if (!STI->hasFPRegs())
1220     return;
1221 
1222   // Store an available register for FPSCR clearing
1223   assert(!ScratchRegs.empty());
1224   unsigned SpareReg = ScratchRegs.front();
1225 
1226   // save space on stack for VLSTM
1227   BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1228       .addReg(ARM::SP)
1229       .addImm(CMSE_FP_SAVE_SIZE >> 2)
1230       .add(predOps(ARMCC::AL));
1231 
1232   // Use ScratchRegs to store the fp regs
1233   std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1234   std::vector<unsigned> NonclearedFPRegs;
1235   for (const MachineOperand &Op : MBBI->operands()) {
1236     if (Op.isReg() && Op.isUse()) {
1237       unsigned Reg = Op.getReg();
1238       assert(!ARM::DPRRegClass.contains(Reg) ||
1239              ARM::DPR_VFP2RegClass.contains(Reg));
1240       assert(!ARM::QPRRegClass.contains(Reg));
1241       if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1242         if (ScratchRegs.size() >= 2) {
1243           unsigned SaveReg2 = ScratchRegs.pop_back_val();
1244           unsigned SaveReg1 = ScratchRegs.pop_back_val();
1245           ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1246 
1247           // Save the fp register to the normal registers
1248           BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1249               .addReg(SaveReg1, RegState::Define)
1250               .addReg(SaveReg2, RegState::Define)
1251               .addReg(Reg)
1252               .add(predOps(ARMCC::AL));
1253         } else {
1254           NonclearedFPRegs.push_back(Reg);
1255         }
1256       } else if (ARM::SPRRegClass.contains(Reg)) {
1257         if (ScratchRegs.size() >= 1) {
1258           unsigned SaveReg = ScratchRegs.pop_back_val();
1259           ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1260 
1261           // Save the fp register to the normal registers
1262           BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1263               .addReg(Reg)
1264               .add(predOps(ARMCC::AL));
1265         } else {
1266           NonclearedFPRegs.push_back(Reg);
1267         }
1268       }
1269     }
1270   }
1271 
1272   bool passesFPReg = (!NonclearedFPRegs.empty() || !ClearedFPRegs.empty());
1273 
1274   // Lazy store all fp registers to the stack
1275   MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1276                                   .addReg(ARM::SP)
1277                                   .add(predOps(ARMCC::AL));
1278   for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1279                  ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1280     VLSTM.addReg(R, RegState::Implicit |
1281                         (LiveRegs.contains(R) ? 0 : RegState::Undef));
1282 
1283   // Restore all arguments
1284   for (const auto &Regs : ClearedFPRegs) {
1285     unsigned Reg, SaveReg1, SaveReg2;
1286     std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1287     if (ARM::DPR_VFP2RegClass.contains(Reg))
1288       BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1289           .addReg(SaveReg1)
1290           .addReg(SaveReg2)
1291           .add(predOps(ARMCC::AL));
1292     else if (ARM::SPRRegClass.contains(Reg))
1293       BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1294           .addReg(SaveReg1)
1295           .add(predOps(ARMCC::AL));
1296   }
1297 
1298   for (unsigned Reg : NonclearedFPRegs) {
1299     if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1300       if (STI->isLittle()) {
1301         BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRD), Reg)
1302             .addReg(ARM::SP)
1303             .addImm((Reg - ARM::D0) * 2)
1304             .add(predOps(ARMCC::AL));
1305       } else {
1306         // For big-endian targets we need to load the two subregisters of Reg
1307         // manually because VLDRD would load them in wrong order
1308         unsigned SReg0 = TRI->getSubReg(Reg, ARM::ssub_0);
1309         BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0)
1310             .addReg(ARM::SP)
1311             .addImm((Reg - ARM::D0) * 2)
1312             .add(predOps(ARMCC::AL));
1313         BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0 + 1)
1314             .addReg(ARM::SP)
1315             .addImm((Reg - ARM::D0) * 2 + 1)
1316             .add(predOps(ARMCC::AL));
1317       }
1318     } else if (ARM::SPRRegClass.contains(Reg)) {
1319       BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), Reg)
1320           .addReg(ARM::SP)
1321           .addImm(Reg - ARM::S0)
1322           .add(predOps(ARMCC::AL));
1323     }
1324   }
1325   // restore FPSCR from stack and clear bits 0-4, 7, 28-31
1326   // The other bits are program global according to the AAPCS
1327   if (passesFPReg) {
1328     BuildMI(MBB, MBBI, DL, TII->get(ARM::t2LDRi8), SpareReg)
1329         .addReg(ARM::SP)
1330         .addImm(0x40)
1331         .add(predOps(ARMCC::AL));
1332     BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1333         .addReg(SpareReg)
1334         .addImm(0x0000009F)
1335         .add(predOps(ARMCC::AL))
1336         .add(condCodeOp());
1337     BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1338         .addReg(SpareReg)
1339         .addImm(0xF0000000)
1340         .add(predOps(ARMCC::AL))
1341         .add(condCodeOp());
1342     BuildMI(MBB, MBBI, DL, TII->get(ARM::VMSR))
1343         .addReg(SpareReg)
1344         .add(predOps(ARMCC::AL));
1345     // The ldr must happen after a floating point instruction. To prevent the
1346     // post-ra scheduler to mess with the order, we create a bundle.
1347     finalizeBundle(MBB, VLSTM->getIterator(), MBBI->getIterator());
1348   }
1349 }
1350 
CMSESaveClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs)1351 void ARMExpandPseudo::CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
1352                                              MachineBasicBlock::iterator MBBI,
1353                                              DebugLoc &DL,
1354                                              const LivePhysRegs &LiveRegs) {
1355   BitVector ClearRegs(32, true);
1356   bool DefFP = determineFPRegsToClear(*MBBI, ClearRegs);
1357 
1358   // If the instruction does not write to a FP register and no elements were
1359   // removed from the set, then no FP registers were used to pass
1360   // arguments/returns.
1361   if (!DefFP && ClearRegs.count() == ClearRegs.size()) {
1362     // save space on stack for VLSTM
1363     BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1364         .addReg(ARM::SP)
1365         .addImm(CMSE_FP_SAVE_SIZE >> 2)
1366         .add(predOps(ARMCC::AL));
1367 
1368     // Lazy store all FP registers to the stack
1369     MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1370                                     .addReg(ARM::SP)
1371                                     .add(predOps(ARMCC::AL));
1372     for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1373                    ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1374       VLSTM.addReg(R, RegState::Implicit |
1375                           (LiveRegs.contains(R) ? 0 : RegState::Undef));
1376   } else {
1377     // Push all the callee-saved registers (s16-s31).
1378     MachineInstrBuilder VPUSH =
1379         BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTMSDB_UPD), ARM::SP)
1380             .addReg(ARM::SP)
1381             .add(predOps(ARMCC::AL));
1382     for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1383       VPUSH.addReg(Reg);
1384 
1385     // Clear FP registers with a VSCCLRM.
1386     (void)CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1387 
1388     // Save floating-point context.
1389     BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTR_FPCXTS_pre), ARM::SP)
1390         .addReg(ARM::SP)
1391         .addImm(-8)
1392         .add(predOps(ARMCC::AL));
1393   }
1394 }
1395 
1396 // Restore FP registers if present
CMSERestoreFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1397 void ARMExpandPseudo::CMSERestoreFPRegs(
1398     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1399     SmallVectorImpl<unsigned> &AvailableRegs) {
1400   if (STI->hasV8_1MMainlineOps())
1401     CMSERestoreFPRegsV81(MBB, MBBI, DL, AvailableRegs);
1402   else
1403     CMSERestoreFPRegsV8(MBB, MBBI, DL, AvailableRegs);
1404 }
1405 
CMSERestoreFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1406 void ARMExpandPseudo::CMSERestoreFPRegsV8(
1407     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1408     SmallVectorImpl<unsigned> &AvailableRegs) {
1409   if (!STI->hasFPRegs())
1410     return;
1411 
1412   // Use AvailableRegs to store the fp regs
1413   std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1414   std::vector<unsigned> NonclearedFPRegs;
1415   for (const MachineOperand &Op : MBBI->operands()) {
1416     if (Op.isReg() && Op.isDef()) {
1417       unsigned Reg = Op.getReg();
1418       assert(!ARM::DPRRegClass.contains(Reg) ||
1419              ARM::DPR_VFP2RegClass.contains(Reg));
1420       assert(!ARM::QPRRegClass.contains(Reg));
1421       if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1422         if (AvailableRegs.size() >= 2) {
1423           unsigned SaveReg2 = AvailableRegs.pop_back_val();
1424           unsigned SaveReg1 = AvailableRegs.pop_back_val();
1425           ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1426 
1427           // Save the fp register to the normal registers
1428           BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1429               .addReg(SaveReg1, RegState::Define)
1430               .addReg(SaveReg2, RegState::Define)
1431               .addReg(Reg)
1432               .add(predOps(ARMCC::AL));
1433         } else {
1434           NonclearedFPRegs.push_back(Reg);
1435         }
1436       } else if (ARM::SPRRegClass.contains(Reg)) {
1437         if (AvailableRegs.size() >= 1) {
1438           unsigned SaveReg = AvailableRegs.pop_back_val();
1439           ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1440 
1441           // Save the fp register to the normal registers
1442           BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1443               .addReg(Reg)
1444               .add(predOps(ARMCC::AL));
1445         } else {
1446           NonclearedFPRegs.push_back(Reg);
1447         }
1448       }
1449     }
1450   }
1451 
1452   // Push FP regs that cannot be restored via normal registers on the stack
1453   for (unsigned Reg : NonclearedFPRegs) {
1454     if (ARM::DPR_VFP2RegClass.contains(Reg))
1455       BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRD), Reg)
1456           .addReg(ARM::SP)
1457           .addImm((Reg - ARM::D0) * 2)
1458           .add(predOps(ARMCC::AL));
1459     else if (ARM::SPRRegClass.contains(Reg))
1460       BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRS), Reg)
1461           .addReg(ARM::SP)
1462           .addImm(Reg - ARM::S0)
1463           .add(predOps(ARMCC::AL));
1464   }
1465 
1466   // Lazy load fp regs from stack
1467   BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1468       .addReg(ARM::SP)
1469       .add(predOps(ARMCC::AL));
1470 
1471   // Restore all FP registers via normal registers
1472   for (const auto &Regs : ClearedFPRegs) {
1473     unsigned Reg, SaveReg1, SaveReg2;
1474     std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1475     if (ARM::DPR_VFP2RegClass.contains(Reg))
1476       BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1477           .addReg(SaveReg1)
1478           .addReg(SaveReg2)
1479           .add(predOps(ARMCC::AL));
1480     else if (ARM::SPRRegClass.contains(Reg))
1481       BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1482           .addReg(SaveReg1)
1483           .add(predOps(ARMCC::AL));
1484   }
1485 
1486   // Pop the stack space
1487   BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1488       .addReg(ARM::SP)
1489       .addImm(CMSE_FP_SAVE_SIZE >> 2)
1490       .add(predOps(ARMCC::AL));
1491 }
1492 
definesOrUsesFPReg(const MachineInstr & MI)1493 static bool definesOrUsesFPReg(const MachineInstr &MI) {
1494   for (const MachineOperand &Op : MI.operands()) {
1495     if (!Op.isReg())
1496       continue;
1497     unsigned Reg = Op.getReg();
1498     if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1499         (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1500         (Reg >= ARM::S0 && Reg <= ARM::S31))
1501       return true;
1502   }
1503   return false;
1504 }
1505 
CMSERestoreFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1506 void ARMExpandPseudo::CMSERestoreFPRegsV81(
1507     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1508     SmallVectorImpl<unsigned> &AvailableRegs) {
1509   if (!definesOrUsesFPReg(*MBBI)) {
1510     // Load FP registers from stack.
1511     BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1512         .addReg(ARM::SP)
1513         .add(predOps(ARMCC::AL));
1514 
1515     // Pop the stack space
1516     BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1517         .addReg(ARM::SP)
1518         .addImm(CMSE_FP_SAVE_SIZE >> 2)
1519         .add(predOps(ARMCC::AL));
1520   } else {
1521     // Restore the floating point context.
1522     BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::VLDR_FPCXTS_post),
1523             ARM::SP)
1524         .addReg(ARM::SP)
1525         .addImm(8)
1526         .add(predOps(ARMCC::AL));
1527 
1528     // Pop all the callee-saved registers (s16-s31).
1529     MachineInstrBuilder VPOP =
1530         BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDMSIA_UPD), ARM::SP)
1531             .addReg(ARM::SP)
1532             .add(predOps(ARMCC::AL));
1533     for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1534       VPOP.addReg(Reg, RegState::Define);
1535   }
1536 }
1537 
1538 /// Expand a CMP_SWAP pseudo-inst to an ldrex/strex loop as simply as
1539 /// possible. This only gets used at -O0 so we don't care about efficiency of
1540 /// the generated code.
ExpandCMP_SWAP(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned LdrexOp,unsigned StrexOp,unsigned UxtOp,MachineBasicBlock::iterator & NextMBBI)1541 bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB,
1542                                      MachineBasicBlock::iterator MBBI,
1543                                      unsigned LdrexOp, unsigned StrexOp,
1544                                      unsigned UxtOp,
1545                                      MachineBasicBlock::iterator &NextMBBI) {
1546   bool IsThumb = STI->isThumb();
1547   MachineInstr &MI = *MBBI;
1548   DebugLoc DL = MI.getDebugLoc();
1549   const MachineOperand &Dest = MI.getOperand(0);
1550   Register TempReg = MI.getOperand(1).getReg();
1551   // Duplicating undef operands into 2 instructions does not guarantee the same
1552   // value on both; However undef should be replaced by xzr anyway.
1553   assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1554   Register AddrReg = MI.getOperand(2).getReg();
1555   Register DesiredReg = MI.getOperand(3).getReg();
1556   Register NewReg = MI.getOperand(4).getReg();
1557 
1558   MachineFunction *MF = MBB.getParent();
1559   auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1560   auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1561   auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1562 
1563   MF->insert(++MBB.getIterator(), LoadCmpBB);
1564   MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1565   MF->insert(++StoreBB->getIterator(), DoneBB);
1566 
1567   if (UxtOp) {
1568     MachineInstrBuilder MIB =
1569         BuildMI(MBB, MBBI, DL, TII->get(UxtOp), DesiredReg)
1570             .addReg(DesiredReg, RegState::Kill);
1571     if (!IsThumb)
1572       MIB.addImm(0);
1573     MIB.add(predOps(ARMCC::AL));
1574   }
1575 
1576   // .Lloadcmp:
1577   //     ldrex rDest, [rAddr]
1578   //     cmp rDest, rDesired
1579   //     bne .Ldone
1580 
1581   MachineInstrBuilder MIB;
1582   MIB = BuildMI(LoadCmpBB, DL, TII->get(LdrexOp), Dest.getReg());
1583   MIB.addReg(AddrReg);
1584   if (LdrexOp == ARM::t2LDREX)
1585     MIB.addImm(0); // a 32-bit Thumb ldrex (only) allows an offset.
1586   MIB.add(predOps(ARMCC::AL));
1587 
1588   unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1589   BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1590       .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
1591       .addReg(DesiredReg)
1592       .add(predOps(ARMCC::AL));
1593   unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1594   BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1595       .addMBB(DoneBB)
1596       .addImm(ARMCC::NE)
1597       .addReg(ARM::CPSR, RegState::Kill);
1598   LoadCmpBB->addSuccessor(DoneBB);
1599   LoadCmpBB->addSuccessor(StoreBB);
1600 
1601   // .Lstore:
1602   //     strex rTempReg, rNew, [rAddr]
1603   //     cmp rTempReg, #0
1604   //     bne .Lloadcmp
1605   MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), TempReg)
1606     .addReg(NewReg)
1607     .addReg(AddrReg);
1608   if (StrexOp == ARM::t2STREX)
1609     MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset.
1610   MIB.add(predOps(ARMCC::AL));
1611 
1612   unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
1613   BuildMI(StoreBB, DL, TII->get(CMPri))
1614       .addReg(TempReg, RegState::Kill)
1615       .addImm(0)
1616       .add(predOps(ARMCC::AL));
1617   BuildMI(StoreBB, DL, TII->get(Bcc))
1618       .addMBB(LoadCmpBB)
1619       .addImm(ARMCC::NE)
1620       .addReg(ARM::CPSR, RegState::Kill);
1621   StoreBB->addSuccessor(LoadCmpBB);
1622   StoreBB->addSuccessor(DoneBB);
1623 
1624   DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1625   DoneBB->transferSuccessors(&MBB);
1626 
1627   MBB.addSuccessor(LoadCmpBB);
1628 
1629   NextMBBI = MBB.end();
1630   MI.eraseFromParent();
1631 
1632   // Recompute livein lists.
1633   LivePhysRegs LiveRegs;
1634   computeAndAddLiveIns(LiveRegs, *DoneBB);
1635   computeAndAddLiveIns(LiveRegs, *StoreBB);
1636   computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1637   // Do an extra pass around the loop to get loop carried registers right.
1638   StoreBB->clearLiveIns();
1639   computeAndAddLiveIns(LiveRegs, *StoreBB);
1640   LoadCmpBB->clearLiveIns();
1641   computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1642 
1643   return true;
1644 }
1645 
1646 /// ARM's ldrexd/strexd take a consecutive register pair (represented as a
1647 /// single GPRPair register), Thumb's take two separate registers so we need to
1648 /// extract the subregs from the pair.
addExclusiveRegPair(MachineInstrBuilder & MIB,MachineOperand & Reg,unsigned Flags,bool IsThumb,const TargetRegisterInfo * TRI)1649 static void addExclusiveRegPair(MachineInstrBuilder &MIB, MachineOperand &Reg,
1650                                 unsigned Flags, bool IsThumb,
1651                                 const TargetRegisterInfo *TRI) {
1652   if (IsThumb) {
1653     Register RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0);
1654     Register RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1);
1655     MIB.addReg(RegLo, Flags);
1656     MIB.addReg(RegHi, Flags);
1657   } else
1658     MIB.addReg(Reg.getReg(), Flags);
1659 }
1660 
1661 /// Expand a 64-bit CMP_SWAP to an ldrexd/strexd loop.
ExpandCMP_SWAP_64(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)1662 bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
1663                                         MachineBasicBlock::iterator MBBI,
1664                                         MachineBasicBlock::iterator &NextMBBI) {
1665   bool IsThumb = STI->isThumb();
1666   MachineInstr &MI = *MBBI;
1667   DebugLoc DL = MI.getDebugLoc();
1668   MachineOperand &Dest = MI.getOperand(0);
1669   Register TempReg = MI.getOperand(1).getReg();
1670   // Duplicating undef operands into 2 instructions does not guarantee the same
1671   // value on both; However undef should be replaced by xzr anyway.
1672   assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1673   Register AddrReg = MI.getOperand(2).getReg();
1674   Register DesiredReg = MI.getOperand(3).getReg();
1675   MachineOperand New = MI.getOperand(4);
1676   New.setIsKill(false);
1677 
1678   Register DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0);
1679   Register DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1);
1680   Register DesiredLo = TRI->getSubReg(DesiredReg, ARM::gsub_0);
1681   Register DesiredHi = TRI->getSubReg(DesiredReg, ARM::gsub_1);
1682 
1683   MachineFunction *MF = MBB.getParent();
1684   auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1685   auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1686   auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1687 
1688   MF->insert(++MBB.getIterator(), LoadCmpBB);
1689   MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1690   MF->insert(++StoreBB->getIterator(), DoneBB);
1691 
1692   // .Lloadcmp:
1693   //     ldrexd rDestLo, rDestHi, [rAddr]
1694   //     cmp rDestLo, rDesiredLo
1695   //     sbcs dead rTempReg, rDestHi, rDesiredHi
1696   //     bne .Ldone
1697   unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
1698   MachineInstrBuilder MIB;
1699   MIB = BuildMI(LoadCmpBB, DL, TII->get(LDREXD));
1700   addExclusiveRegPair(MIB, Dest, RegState::Define, IsThumb, TRI);
1701   MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1702 
1703   unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1704   BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1705       .addReg(DestLo, getKillRegState(Dest.isDead()))
1706       .addReg(DesiredLo)
1707       .add(predOps(ARMCC::AL));
1708 
1709   BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1710       .addReg(DestHi, getKillRegState(Dest.isDead()))
1711       .addReg(DesiredHi)
1712       .addImm(ARMCC::EQ).addReg(ARM::CPSR, RegState::Kill);
1713 
1714   unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1715   BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1716       .addMBB(DoneBB)
1717       .addImm(ARMCC::NE)
1718       .addReg(ARM::CPSR, RegState::Kill);
1719   LoadCmpBB->addSuccessor(DoneBB);
1720   LoadCmpBB->addSuccessor(StoreBB);
1721 
1722   // .Lstore:
1723   //     strexd rTempReg, rNewLo, rNewHi, [rAddr]
1724   //     cmp rTempReg, #0
1725   //     bne .Lloadcmp
1726   unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD;
1727   MIB = BuildMI(StoreBB, DL, TII->get(STREXD), TempReg);
1728   unsigned Flags = getKillRegState(New.isDead());
1729   addExclusiveRegPair(MIB, New, Flags, IsThumb, TRI);
1730   MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1731 
1732   unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
1733   BuildMI(StoreBB, DL, TII->get(CMPri))
1734       .addReg(TempReg, RegState::Kill)
1735       .addImm(0)
1736       .add(predOps(ARMCC::AL));
1737   BuildMI(StoreBB, DL, TII->get(Bcc))
1738       .addMBB(LoadCmpBB)
1739       .addImm(ARMCC::NE)
1740       .addReg(ARM::CPSR, RegState::Kill);
1741   StoreBB->addSuccessor(LoadCmpBB);
1742   StoreBB->addSuccessor(DoneBB);
1743 
1744   DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1745   DoneBB->transferSuccessors(&MBB);
1746 
1747   MBB.addSuccessor(LoadCmpBB);
1748 
1749   NextMBBI = MBB.end();
1750   MI.eraseFromParent();
1751 
1752   // Recompute livein lists.
1753   LivePhysRegs LiveRegs;
1754   computeAndAddLiveIns(LiveRegs, *DoneBB);
1755   computeAndAddLiveIns(LiveRegs, *StoreBB);
1756   computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1757   // Do an extra pass around the loop to get loop carried registers right.
1758   StoreBB->clearLiveIns();
1759   computeAndAddLiveIns(LiveRegs, *StoreBB);
1760   LoadCmpBB->clearLiveIns();
1761   computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1762 
1763   return true;
1764 }
1765 
CMSEPushCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,const LivePhysRegs & LiveRegs,bool Thumb1Only)1766 static void CMSEPushCalleeSaves(const TargetInstrInfo &TII,
1767                                 MachineBasicBlock &MBB,
1768                                 MachineBasicBlock::iterator MBBI, int JumpReg,
1769                                 const LivePhysRegs &LiveRegs, bool Thumb1Only) {
1770   const DebugLoc &DL = MBBI->getDebugLoc();
1771   if (Thumb1Only) { // push Lo and Hi regs separately
1772     MachineInstrBuilder PushMIB =
1773         BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1774     for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1775       PushMIB.addReg(
1776           Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
1777     }
1778 
1779     // Thumb1 can only tPUSH low regs, so we copy the high regs to the low
1780     // regs that we just saved and push the low regs again, taking care to
1781     // not clobber JumpReg. If JumpReg is one of the low registers, push first
1782     // the values of r9-r11, and then r8. That would leave them ordered in
1783     // memory, and allow us to later pop them with a single instructions.
1784     // FIXME: Could also use any of r0-r3 that are free (including in the
1785     // first PUSH above).
1786     for (int LoReg = ARM::R7, HiReg = ARM::R11; LoReg >= ARM::R4; --LoReg) {
1787       if (JumpReg == LoReg)
1788         continue;
1789       BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1790           .addReg(HiReg, LiveRegs.contains(HiReg) ? 0 : RegState::Undef)
1791           .add(predOps(ARMCC::AL));
1792       --HiReg;
1793     }
1794     MachineInstrBuilder PushMIB2 =
1795         BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1796     for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1797       if (Reg == JumpReg)
1798         continue;
1799       PushMIB2.addReg(Reg, RegState::Kill);
1800     }
1801 
1802     // If we couldn't use a low register for temporary storage (because it was
1803     // the JumpReg), use r4 or r5, whichever is not JumpReg. It has already been
1804     // saved.
1805     if (JumpReg >= ARM::R4 && JumpReg <= ARM::R7) {
1806       int LoReg = JumpReg == ARM::R4 ? ARM::R5 : ARM::R4;
1807       BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1808           .addReg(ARM::R8, LiveRegs.contains(ARM::R8) ? 0 : RegState::Undef)
1809           .add(predOps(ARMCC::AL));
1810       BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH))
1811           .add(predOps(ARMCC::AL))
1812           .addReg(LoReg, RegState::Kill);
1813     }
1814   } else { // push Lo and Hi registers with a single instruction
1815     MachineInstrBuilder PushMIB =
1816         BuildMI(MBB, MBBI, DL, TII.get(ARM::t2STMDB_UPD), ARM::SP)
1817             .addReg(ARM::SP)
1818             .add(predOps(ARMCC::AL));
1819     for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg) {
1820       PushMIB.addReg(
1821           Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
1822     }
1823   }
1824 }
1825 
CMSEPopCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,bool Thumb1Only)1826 static void CMSEPopCalleeSaves(const TargetInstrInfo &TII,
1827                                MachineBasicBlock &MBB,
1828                                MachineBasicBlock::iterator MBBI, int JumpReg,
1829                                bool Thumb1Only) {
1830   const DebugLoc &DL = MBBI->getDebugLoc();
1831   if (Thumb1Only) {
1832     MachineInstrBuilder PopMIB =
1833         BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
1834     for (int R = 0; R < 4; ++R) {
1835       PopMIB.addReg(ARM::R4 + R, RegState::Define);
1836       BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), ARM::R8 + R)
1837           .addReg(ARM::R4 + R, RegState::Kill)
1838           .add(predOps(ARMCC::AL));
1839     }
1840     MachineInstrBuilder PopMIB2 =
1841         BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
1842     for (int R = 0; R < 4; ++R)
1843       PopMIB2.addReg(ARM::R4 + R, RegState::Define);
1844   } else { // pop Lo and Hi registers with a single instruction
1845     MachineInstrBuilder PopMIB =
1846         BuildMI(MBB, MBBI, DL, TII.get(ARM::t2LDMIA_UPD), ARM::SP)
1847             .addReg(ARM::SP)
1848             .add(predOps(ARMCC::AL));
1849     for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg)
1850       PopMIB.addReg(Reg, RegState::Define);
1851   }
1852 }
1853 
ExpandMI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)1854 bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
1855                                MachineBasicBlock::iterator MBBI,
1856                                MachineBasicBlock::iterator &NextMBBI) {
1857   MachineInstr &MI = *MBBI;
1858   unsigned Opcode = MI.getOpcode();
1859   switch (Opcode) {
1860     default:
1861       return false;
1862 
1863     case ARM::TCRETURNdi:
1864     case ARM::TCRETURNri: {
1865       MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1866       assert(MBBI->isReturn() &&
1867              "Can only insert epilog into returning blocks");
1868       unsigned RetOpcode = MBBI->getOpcode();
1869       DebugLoc dl = MBBI->getDebugLoc();
1870       const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>(
1871           MBB.getParent()->getSubtarget().getInstrInfo());
1872 
1873       // Tail call return: adjust the stack pointer and jump to callee.
1874       MBBI = MBB.getLastNonDebugInstr();
1875       MachineOperand &JumpTarget = MBBI->getOperand(0);
1876 
1877       // Jump to label or value in register.
1878       if (RetOpcode == ARM::TCRETURNdi) {
1879         unsigned TCOpcode =
1880             STI->isThumb()
1881                 ? (STI->isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND)
1882                 : ARM::TAILJMPd;
1883         MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
1884         if (JumpTarget.isGlobal())
1885           MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1886                                JumpTarget.getTargetFlags());
1887         else {
1888           assert(JumpTarget.isSymbol());
1889           MIB.addExternalSymbol(JumpTarget.getSymbolName(),
1890                                 JumpTarget.getTargetFlags());
1891         }
1892 
1893         // Add the default predicate in Thumb mode.
1894         if (STI->isThumb())
1895           MIB.add(predOps(ARMCC::AL));
1896       } else if (RetOpcode == ARM::TCRETURNri) {
1897         unsigned Opcode =
1898           STI->isThumb() ? ARM::tTAILJMPr
1899                          : (STI->hasV4TOps() ? ARM::TAILJMPr : ARM::TAILJMPr4);
1900         BuildMI(MBB, MBBI, dl,
1901                 TII.get(Opcode))
1902             .addReg(JumpTarget.getReg(), RegState::Kill);
1903       }
1904 
1905       auto NewMI = std::prev(MBBI);
1906       for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
1907         NewMI->addOperand(MBBI->getOperand(i));
1908 
1909 
1910       // Update call site info and delete the pseudo instruction TCRETURN.
1911       if (MI.isCandidateForCallSiteEntry())
1912         MI.getMF()->moveCallSiteInfo(&MI, &*NewMI);
1913       MBB.erase(MBBI);
1914 
1915       MBBI = NewMI;
1916       return true;
1917     }
1918     case ARM::tBXNS_RET: {
1919       MachineBasicBlock &AfterBB = CMSEClearFPRegs(MBB, MBBI);
1920 
1921       if (STI->hasV8_1MMainlineOps()) {
1922         // Restore the non-secure floating point context.
1923         BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1924                 TII->get(ARM::VLDR_FPCXTNS_post), ARM::SP)
1925             .addReg(ARM::SP)
1926             .addImm(4)
1927             .add(predOps(ARMCC::AL));
1928       }
1929 
1930       // Clear all GPR that are not a use of the return instruction.
1931       assert(llvm::all_of(MBBI->operands(), [](const MachineOperand &Op) {
1932         return !Op.isReg() || Op.getReg() != ARM::R12;
1933       }));
1934       SmallVector<unsigned, 5> ClearRegs;
1935       determineGPRegsToClear(
1936           *MBBI, {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R12}, ClearRegs);
1937       CMSEClearGPRegs(AfterBB, AfterBB.end(), MBBI->getDebugLoc(), ClearRegs,
1938                       ARM::LR);
1939 
1940       MachineInstrBuilder NewMI =
1941           BuildMI(AfterBB, AfterBB.end(), MBBI->getDebugLoc(),
1942                   TII->get(ARM::tBXNS))
1943               .addReg(ARM::LR)
1944               .add(predOps(ARMCC::AL));
1945       for (const MachineOperand &Op : MI.operands())
1946         NewMI->addOperand(Op);
1947       MI.eraseFromParent();
1948       return true;
1949     }
1950     case ARM::tBLXNS_CALL: {
1951       DebugLoc DL = MBBI->getDebugLoc();
1952       unsigned JumpReg = MBBI->getOperand(0).getReg();
1953 
1954       // Figure out which registers are live at the point immediately before the
1955       // call. When we indiscriminately push a set of registers, the live
1956       // registers are added as ordinary use operands, whereas dead registers
1957       // are "undef".
1958       LivePhysRegs LiveRegs(*TRI);
1959       LiveRegs.addLiveOuts(MBB);
1960       for (const MachineInstr &MI : make_range(MBB.rbegin(), MBBI.getReverse()))
1961         LiveRegs.stepBackward(MI);
1962       LiveRegs.stepBackward(*MBBI);
1963 
1964       CMSEPushCalleeSaves(*TII, MBB, MBBI, JumpReg, LiveRegs,
1965                           AFI->isThumb1OnlyFunction());
1966 
1967       SmallVector<unsigned, 16> ClearRegs;
1968       determineGPRegsToClear(*MBBI,
1969                              {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4,
1970                               ARM::R5, ARM::R6, ARM::R7, ARM::R8, ARM::R9,
1971                               ARM::R10, ARM::R11, ARM::R12},
1972                              ClearRegs);
1973       auto OriginalClearRegs = ClearRegs;
1974 
1975       // Get the first cleared register as a scratch (to use later with tBIC).
1976       // We need to use the first so we can ensure it is a low register.
1977       unsigned ScratchReg = ClearRegs.front();
1978 
1979       // Clear LSB of JumpReg
1980       if (AFI->isThumb2Function()) {
1981         BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), JumpReg)
1982             .addReg(JumpReg)
1983             .addImm(1)
1984             .add(predOps(ARMCC::AL))
1985             .add(condCodeOp());
1986       } else {
1987         // We need to use an extra register to cope with 8M Baseline,
1988         // since we have saved all of the registers we are ok to trash a non
1989         // argument register here.
1990         BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVi8), ScratchReg)
1991             .add(condCodeOp())
1992             .addImm(1)
1993             .add(predOps(ARMCC::AL));
1994         BuildMI(MBB, MBBI, DL, TII->get(ARM::tBIC), JumpReg)
1995             .addReg(ARM::CPSR, RegState::Define)
1996             .addReg(JumpReg)
1997             .addReg(ScratchReg)
1998             .add(predOps(ARMCC::AL));
1999       }
2000 
2001       CMSESaveClearFPRegs(MBB, MBBI, DL, LiveRegs,
2002                           ClearRegs); // save+clear FP regs with ClearRegs
2003       CMSEClearGPRegs(MBB, MBBI, DL, ClearRegs, JumpReg);
2004 
2005       const MachineInstrBuilder NewCall =
2006           BuildMI(MBB, MBBI, DL, TII->get(ARM::tBLXNSr))
2007               .add(predOps(ARMCC::AL))
2008               .addReg(JumpReg, RegState::Kill);
2009 
2010       for (int I = 1, E = MI.getNumOperands(); I != E; ++I)
2011         NewCall->addOperand(MI.getOperand(I));
2012       if (MI.isCandidateForCallSiteEntry())
2013         MI.getMF()->moveCallSiteInfo(&MI, NewCall.getInstr());
2014 
2015       CMSERestoreFPRegs(MBB, MBBI, DL, OriginalClearRegs); // restore FP registers
2016 
2017       CMSEPopCalleeSaves(*TII, MBB, MBBI, JumpReg, AFI->isThumb1OnlyFunction());
2018 
2019       MI.eraseFromParent();
2020       return true;
2021     }
2022     case ARM::VMOVHcc:
2023     case ARM::VMOVScc:
2024     case ARM::VMOVDcc: {
2025       unsigned newOpc = Opcode != ARM::VMOVDcc ? ARM::VMOVS : ARM::VMOVD;
2026       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc),
2027               MI.getOperand(1).getReg())
2028           .add(MI.getOperand(2))
2029           .addImm(MI.getOperand(3).getImm()) // 'pred'
2030           .add(MI.getOperand(4))
2031           .add(makeImplicit(MI.getOperand(1)));
2032 
2033       MI.eraseFromParent();
2034       return true;
2035     }
2036     case ARM::t2MOVCCr:
2037     case ARM::MOVCCr: {
2038       unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr;
2039       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2040               MI.getOperand(1).getReg())
2041           .add(MI.getOperand(2))
2042           .addImm(MI.getOperand(3).getImm()) // 'pred'
2043           .add(MI.getOperand(4))
2044           .add(condCodeOp()) // 's' bit
2045           .add(makeImplicit(MI.getOperand(1)));
2046 
2047       MI.eraseFromParent();
2048       return true;
2049     }
2050     case ARM::MOVCCsi: {
2051       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2052               (MI.getOperand(1).getReg()))
2053           .add(MI.getOperand(2))
2054           .addImm(MI.getOperand(3).getImm())
2055           .addImm(MI.getOperand(4).getImm()) // 'pred'
2056           .add(MI.getOperand(5))
2057           .add(condCodeOp()) // 's' bit
2058           .add(makeImplicit(MI.getOperand(1)));
2059 
2060       MI.eraseFromParent();
2061       return true;
2062     }
2063     case ARM::MOVCCsr: {
2064       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr),
2065               (MI.getOperand(1).getReg()))
2066           .add(MI.getOperand(2))
2067           .add(MI.getOperand(3))
2068           .addImm(MI.getOperand(4).getImm())
2069           .addImm(MI.getOperand(5).getImm()) // 'pred'
2070           .add(MI.getOperand(6))
2071           .add(condCodeOp()) // 's' bit
2072           .add(makeImplicit(MI.getOperand(1)));
2073 
2074       MI.eraseFromParent();
2075       return true;
2076     }
2077     case ARM::t2MOVCCi16:
2078     case ARM::MOVCCi16: {
2079       unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16;
2080       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2081               MI.getOperand(1).getReg())
2082           .addImm(MI.getOperand(2).getImm())
2083           .addImm(MI.getOperand(3).getImm()) // 'pred'
2084           .add(MI.getOperand(4))
2085           .add(makeImplicit(MI.getOperand(1)));
2086       MI.eraseFromParent();
2087       return true;
2088     }
2089     case ARM::t2MOVCCi:
2090     case ARM::MOVCCi: {
2091       unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi;
2092       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2093               MI.getOperand(1).getReg())
2094           .addImm(MI.getOperand(2).getImm())
2095           .addImm(MI.getOperand(3).getImm()) // 'pred'
2096           .add(MI.getOperand(4))
2097           .add(condCodeOp()) // 's' bit
2098           .add(makeImplicit(MI.getOperand(1)));
2099 
2100       MI.eraseFromParent();
2101       return true;
2102     }
2103     case ARM::t2MVNCCi:
2104     case ARM::MVNCCi: {
2105       unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi;
2106       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2107               MI.getOperand(1).getReg())
2108           .addImm(MI.getOperand(2).getImm())
2109           .addImm(MI.getOperand(3).getImm()) // 'pred'
2110           .add(MI.getOperand(4))
2111           .add(condCodeOp()) // 's' bit
2112           .add(makeImplicit(MI.getOperand(1)));
2113 
2114       MI.eraseFromParent();
2115       return true;
2116     }
2117     case ARM::t2MOVCClsl:
2118     case ARM::t2MOVCClsr:
2119     case ARM::t2MOVCCasr:
2120     case ARM::t2MOVCCror: {
2121       unsigned NewOpc;
2122       switch (Opcode) {
2123       case ARM::t2MOVCClsl: NewOpc = ARM::t2LSLri; break;
2124       case ARM::t2MOVCClsr: NewOpc = ARM::t2LSRri; break;
2125       case ARM::t2MOVCCasr: NewOpc = ARM::t2ASRri; break;
2126       case ARM::t2MOVCCror: NewOpc = ARM::t2RORri; break;
2127       default: llvm_unreachable("unexpeced conditional move");
2128       }
2129       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2130               MI.getOperand(1).getReg())
2131           .add(MI.getOperand(2))
2132           .addImm(MI.getOperand(3).getImm())
2133           .addImm(MI.getOperand(4).getImm()) // 'pred'
2134           .add(MI.getOperand(5))
2135           .add(condCodeOp()) // 's' bit
2136           .add(makeImplicit(MI.getOperand(1)));
2137       MI.eraseFromParent();
2138       return true;
2139     }
2140     case ARM::Int_eh_sjlj_dispatchsetup: {
2141       MachineFunction &MF = *MI.getParent()->getParent();
2142       const ARMBaseInstrInfo *AII =
2143         static_cast<const ARMBaseInstrInfo*>(TII);
2144       const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
2145       // For functions using a base pointer, we rematerialize it (via the frame
2146       // pointer) here since eh.sjlj.setjmp and eh.sjlj.longjmp don't do it
2147       // for us. Otherwise, expand to nothing.
2148       if (RI.hasBasePointer(MF)) {
2149         int32_t NumBytes = AFI->getFramePtrSpillOffset();
2150         Register FramePtr = RI.getFrameRegister(MF);
2151         assert(MF.getSubtarget().getFrameLowering()->hasFP(MF) &&
2152                "base pointer without frame pointer?");
2153 
2154         if (AFI->isThumb2Function()) {
2155           emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2156                                  FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
2157         } else if (AFI->isThumbFunction()) {
2158           emitThumbRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2159                                     FramePtr, -NumBytes, *TII, RI);
2160         } else {
2161           emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2162                                   FramePtr, -NumBytes, ARMCC::AL, 0,
2163                                   *TII);
2164         }
2165         // If there's dynamic realignment, adjust for it.
2166         if (RI.needsStackRealignment(MF)) {
2167           MachineFrameInfo &MFI = MF.getFrameInfo();
2168           Align MaxAlign = MFI.getMaxAlign();
2169           assert (!AFI->isThumb1OnlyFunction());
2170           // Emit bic r6, r6, MaxAlign
2171           assert(MaxAlign <= Align(256) &&
2172                  "The BIC instruction cannot encode "
2173                  "immediates larger than 256 with all lower "
2174                  "bits set.");
2175           unsigned bicOpc = AFI->isThumbFunction() ?
2176             ARM::t2BICri : ARM::BICri;
2177           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(bicOpc), ARM::R6)
2178               .addReg(ARM::R6, RegState::Kill)
2179               .addImm(MaxAlign.value() - 1)
2180               .add(predOps(ARMCC::AL))
2181               .add(condCodeOp());
2182         }
2183 
2184       }
2185       MI.eraseFromParent();
2186       return true;
2187     }
2188 
2189     case ARM::MOVsrl_flag:
2190     case ARM::MOVsra_flag: {
2191       // These are just fancy MOVs instructions.
2192       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2193               MI.getOperand(0).getReg())
2194           .add(MI.getOperand(1))
2195           .addImm(ARM_AM::getSORegOpc(
2196               (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1))
2197           .add(predOps(ARMCC::AL))
2198           .addReg(ARM::CPSR, RegState::Define);
2199       MI.eraseFromParent();
2200       return true;
2201     }
2202     case ARM::RRX: {
2203       // This encodes as "MOVs Rd, Rm, rrx
2204       MachineInstrBuilder MIB =
2205           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2206                   MI.getOperand(0).getReg())
2207               .add(MI.getOperand(1))
2208               .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0))
2209               .add(predOps(ARMCC::AL))
2210               .add(condCodeOp());
2211       TransferImpOps(MI, MIB, MIB);
2212       MI.eraseFromParent();
2213       return true;
2214     }
2215     case ARM::tTPsoft:
2216     case ARM::TPsoft: {
2217       const bool Thumb = Opcode == ARM::tTPsoft;
2218 
2219       MachineInstrBuilder MIB;
2220       MachineFunction *MF = MBB.getParent();
2221       if (STI->genLongCalls()) {
2222         MachineConstantPool *MCP = MF->getConstantPool();
2223         unsigned PCLabelID = AFI->createPICLabelUId();
2224         MachineConstantPoolValue *CPV =
2225             ARMConstantPoolSymbol::Create(MF->getFunction().getContext(),
2226                                           "__aeabi_read_tp", PCLabelID, 0);
2227         Register Reg = MI.getOperand(0).getReg();
2228         MIB =
2229             BuildMI(MBB, MBBI, MI.getDebugLoc(),
2230                     TII->get(Thumb ? ARM::tLDRpci : ARM::LDRi12), Reg)
2231                 .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2232         if (!Thumb)
2233           MIB.addImm(0);
2234         MIB.add(predOps(ARMCC::AL));
2235 
2236         MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2237                       TII->get(Thumb ? ARM::tBLXr : ARM::BLX));
2238         if (Thumb)
2239           MIB.add(predOps(ARMCC::AL));
2240         MIB.addReg(Reg, RegState::Kill);
2241       } else {
2242         MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2243                       TII->get(Thumb ? ARM::tBL : ARM::BL));
2244         if (Thumb)
2245           MIB.add(predOps(ARMCC::AL));
2246         MIB.addExternalSymbol("__aeabi_read_tp", 0);
2247       }
2248 
2249       MIB.cloneMemRefs(MI);
2250       TransferImpOps(MI, MIB, MIB);
2251       // Update the call site info.
2252       if (MI.isCandidateForCallSiteEntry())
2253         MF->moveCallSiteInfo(&MI, &*MIB);
2254       MI.eraseFromParent();
2255       return true;
2256     }
2257     case ARM::tLDRpci_pic:
2258     case ARM::t2LDRpci_pic: {
2259       unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
2260         ? ARM::tLDRpci : ARM::t2LDRpci;
2261       Register DstReg = MI.getOperand(0).getReg();
2262       bool DstIsDead = MI.getOperand(0).isDead();
2263       MachineInstrBuilder MIB1 =
2264           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
2265               .add(MI.getOperand(1))
2266               .add(predOps(ARMCC::AL));
2267       MIB1.cloneMemRefs(MI);
2268       MachineInstrBuilder MIB2 =
2269           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
2270               .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2271               .addReg(DstReg)
2272               .add(MI.getOperand(2));
2273       TransferImpOps(MI, MIB1, MIB2);
2274       MI.eraseFromParent();
2275       return true;
2276     }
2277 
2278     case ARM::LDRLIT_ga_abs:
2279     case ARM::LDRLIT_ga_pcrel:
2280     case ARM::LDRLIT_ga_pcrel_ldr:
2281     case ARM::tLDRLIT_ga_abs:
2282     case ARM::tLDRLIT_ga_pcrel: {
2283       Register DstReg = MI.getOperand(0).getReg();
2284       bool DstIsDead = MI.getOperand(0).isDead();
2285       const MachineOperand &MO1 = MI.getOperand(1);
2286       auto Flags = MO1.getTargetFlags();
2287       const GlobalValue *GV = MO1.getGlobal();
2288       bool IsARM =
2289           Opcode != ARM::tLDRLIT_ga_pcrel && Opcode != ARM::tLDRLIT_ga_abs;
2290       bool IsPIC =
2291           Opcode != ARM::LDRLIT_ga_abs && Opcode != ARM::tLDRLIT_ga_abs;
2292       unsigned LDRLITOpc = IsARM ? ARM::LDRi12 : ARM::tLDRpci;
2293       unsigned PICAddOpc =
2294           IsARM
2295               ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2296               : ARM::tPICADD;
2297 
2298       // We need a new const-pool entry to load from.
2299       MachineConstantPool *MCP = MBB.getParent()->getConstantPool();
2300       unsigned ARMPCLabelIndex = 0;
2301       MachineConstantPoolValue *CPV;
2302 
2303       if (IsPIC) {
2304         unsigned PCAdj = IsARM ? 8 : 4;
2305         auto Modifier = (Flags & ARMII::MO_GOT)
2306                             ? ARMCP::GOT_PREL
2307                             : ARMCP::no_modifier;
2308         ARMPCLabelIndex = AFI->createPICLabelUId();
2309         CPV = ARMConstantPoolConstant::Create(
2310             GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, Modifier,
2311             /*AddCurrentAddr*/ Modifier == ARMCP::GOT_PREL);
2312       } else
2313         CPV = ARMConstantPoolConstant::Create(GV, ARMCP::no_modifier);
2314 
2315       MachineInstrBuilder MIB =
2316           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LDRLITOpc), DstReg)
2317               .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2318       if (IsARM)
2319         MIB.addImm(0);
2320       MIB.add(predOps(ARMCC::AL));
2321 
2322       if (IsPIC) {
2323         MachineInstrBuilder MIB =
2324           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(PICAddOpc))
2325             .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2326             .addReg(DstReg)
2327             .addImm(ARMPCLabelIndex);
2328 
2329         if (IsARM)
2330           MIB.add(predOps(ARMCC::AL));
2331       }
2332 
2333       MI.eraseFromParent();
2334       return true;
2335     }
2336     case ARM::MOV_ga_pcrel:
2337     case ARM::MOV_ga_pcrel_ldr:
2338     case ARM::t2MOV_ga_pcrel: {
2339       // Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode.
2340       unsigned LabelId = AFI->createPICLabelUId();
2341       Register DstReg = MI.getOperand(0).getReg();
2342       bool DstIsDead = MI.getOperand(0).isDead();
2343       const MachineOperand &MO1 = MI.getOperand(1);
2344       const GlobalValue *GV = MO1.getGlobal();
2345       unsigned TF = MO1.getTargetFlags();
2346       bool isARM = Opcode != ARM::t2MOV_ga_pcrel;
2347       unsigned LO16Opc = isARM ? ARM::MOVi16_ga_pcrel : ARM::t2MOVi16_ga_pcrel;
2348       unsigned HI16Opc = isARM ? ARM::MOVTi16_ga_pcrel :ARM::t2MOVTi16_ga_pcrel;
2349       unsigned LO16TF = TF | ARMII::MO_LO16;
2350       unsigned HI16TF = TF | ARMII::MO_HI16;
2351       unsigned PICAddOpc = isARM
2352         ? (Opcode == ARM::MOV_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2353         : ARM::tPICADD;
2354       MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2355                                          TII->get(LO16Opc), DstReg)
2356         .addGlobalAddress(GV, MO1.getOffset(), TF | LO16TF)
2357         .addImm(LabelId);
2358 
2359       BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc), DstReg)
2360         .addReg(DstReg)
2361         .addGlobalAddress(GV, MO1.getOffset(), TF | HI16TF)
2362         .addImm(LabelId);
2363 
2364       MachineInstrBuilder MIB3 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2365                                          TII->get(PICAddOpc))
2366         .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2367         .addReg(DstReg).addImm(LabelId);
2368       if (isARM) {
2369         MIB3.add(predOps(ARMCC::AL));
2370         if (Opcode == ARM::MOV_ga_pcrel_ldr)
2371           MIB3.cloneMemRefs(MI);
2372       }
2373       TransferImpOps(MI, MIB1, MIB3);
2374       MI.eraseFromParent();
2375       return true;
2376     }
2377 
2378     case ARM::MOVi32imm:
2379     case ARM::MOVCCi32imm:
2380     case ARM::t2MOVi32imm:
2381     case ARM::t2MOVCCi32imm:
2382       ExpandMOV32BitImm(MBB, MBBI);
2383       return true;
2384 
2385     case ARM::SUBS_PC_LR: {
2386       MachineInstrBuilder MIB =
2387           BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC)
2388               .addReg(ARM::LR)
2389               .add(MI.getOperand(0))
2390               .add(MI.getOperand(1))
2391               .add(MI.getOperand(2))
2392               .addReg(ARM::CPSR, RegState::Undef);
2393       TransferImpOps(MI, MIB, MIB);
2394       MI.eraseFromParent();
2395       return true;
2396     }
2397     case ARM::VLDMQIA: {
2398       unsigned NewOpc = ARM::VLDMDIA;
2399       MachineInstrBuilder MIB =
2400         BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2401       unsigned OpIdx = 0;
2402 
2403       // Grab the Q register destination.
2404       bool DstIsDead = MI.getOperand(OpIdx).isDead();
2405       Register DstReg = MI.getOperand(OpIdx++).getReg();
2406 
2407       // Copy the source register.
2408       MIB.add(MI.getOperand(OpIdx++));
2409 
2410       // Copy the predicate operands.
2411       MIB.add(MI.getOperand(OpIdx++));
2412       MIB.add(MI.getOperand(OpIdx++));
2413 
2414       // Add the destination operands (D subregs).
2415       Register D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
2416       Register D1 = TRI->getSubReg(DstReg, ARM::dsub_1);
2417       MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
2418         .addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
2419 
2420       // Add an implicit def for the super-register.
2421       MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
2422       TransferImpOps(MI, MIB, MIB);
2423       MIB.cloneMemRefs(MI);
2424       MI.eraseFromParent();
2425       return true;
2426     }
2427 
2428     case ARM::VSTMQIA: {
2429       unsigned NewOpc = ARM::VSTMDIA;
2430       MachineInstrBuilder MIB =
2431         BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2432       unsigned OpIdx = 0;
2433 
2434       // Grab the Q register source.
2435       bool SrcIsKill = MI.getOperand(OpIdx).isKill();
2436       Register SrcReg = MI.getOperand(OpIdx++).getReg();
2437 
2438       // Copy the destination register.
2439       MachineOperand Dst(MI.getOperand(OpIdx++));
2440       MIB.add(Dst);
2441 
2442       // Copy the predicate operands.
2443       MIB.add(MI.getOperand(OpIdx++));
2444       MIB.add(MI.getOperand(OpIdx++));
2445 
2446       // Add the source operands (D subregs).
2447       Register D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
2448       Register D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
2449       MIB.addReg(D0, SrcIsKill ? RegState::Kill : 0)
2450          .addReg(D1, SrcIsKill ? RegState::Kill : 0);
2451 
2452       if (SrcIsKill)      // Add an implicit kill for the Q register.
2453         MIB->addRegisterKilled(SrcReg, TRI, true);
2454 
2455       TransferImpOps(MI, MIB, MIB);
2456       MIB.cloneMemRefs(MI);
2457       MI.eraseFromParent();
2458       return true;
2459     }
2460 
2461     case ARM::VLD2q8Pseudo:
2462     case ARM::VLD2q16Pseudo:
2463     case ARM::VLD2q32Pseudo:
2464     case ARM::VLD2q8PseudoWB_fixed:
2465     case ARM::VLD2q16PseudoWB_fixed:
2466     case ARM::VLD2q32PseudoWB_fixed:
2467     case ARM::VLD2q8PseudoWB_register:
2468     case ARM::VLD2q16PseudoWB_register:
2469     case ARM::VLD2q32PseudoWB_register:
2470     case ARM::VLD3d8Pseudo:
2471     case ARM::VLD3d16Pseudo:
2472     case ARM::VLD3d32Pseudo:
2473     case ARM::VLD1d8TPseudo:
2474     case ARM::VLD1d16TPseudo:
2475     case ARM::VLD1d32TPseudo:
2476     case ARM::VLD1d64TPseudo:
2477     case ARM::VLD1d64TPseudoWB_fixed:
2478     case ARM::VLD1d64TPseudoWB_register:
2479     case ARM::VLD3d8Pseudo_UPD:
2480     case ARM::VLD3d16Pseudo_UPD:
2481     case ARM::VLD3d32Pseudo_UPD:
2482     case ARM::VLD3q8Pseudo_UPD:
2483     case ARM::VLD3q16Pseudo_UPD:
2484     case ARM::VLD3q32Pseudo_UPD:
2485     case ARM::VLD3q8oddPseudo:
2486     case ARM::VLD3q16oddPseudo:
2487     case ARM::VLD3q32oddPseudo:
2488     case ARM::VLD3q8oddPseudo_UPD:
2489     case ARM::VLD3q16oddPseudo_UPD:
2490     case ARM::VLD3q32oddPseudo_UPD:
2491     case ARM::VLD4d8Pseudo:
2492     case ARM::VLD4d16Pseudo:
2493     case ARM::VLD4d32Pseudo:
2494     case ARM::VLD1d8QPseudo:
2495     case ARM::VLD1d16QPseudo:
2496     case ARM::VLD1d32QPseudo:
2497     case ARM::VLD1d64QPseudo:
2498     case ARM::VLD1d64QPseudoWB_fixed:
2499     case ARM::VLD1d64QPseudoWB_register:
2500     case ARM::VLD1q8HighQPseudo:
2501     case ARM::VLD1q8LowQPseudo_UPD:
2502     case ARM::VLD1q8HighTPseudo:
2503     case ARM::VLD1q8LowTPseudo_UPD:
2504     case ARM::VLD1q16HighQPseudo:
2505     case ARM::VLD1q16LowQPseudo_UPD:
2506     case ARM::VLD1q16HighTPseudo:
2507     case ARM::VLD1q16LowTPseudo_UPD:
2508     case ARM::VLD1q32HighQPseudo:
2509     case ARM::VLD1q32LowQPseudo_UPD:
2510     case ARM::VLD1q32HighTPseudo:
2511     case ARM::VLD1q32LowTPseudo_UPD:
2512     case ARM::VLD1q64HighQPseudo:
2513     case ARM::VLD1q64LowQPseudo_UPD:
2514     case ARM::VLD1q64HighTPseudo:
2515     case ARM::VLD1q64LowTPseudo_UPD:
2516     case ARM::VLD4d8Pseudo_UPD:
2517     case ARM::VLD4d16Pseudo_UPD:
2518     case ARM::VLD4d32Pseudo_UPD:
2519     case ARM::VLD4q8Pseudo_UPD:
2520     case ARM::VLD4q16Pseudo_UPD:
2521     case ARM::VLD4q32Pseudo_UPD:
2522     case ARM::VLD4q8oddPseudo:
2523     case ARM::VLD4q16oddPseudo:
2524     case ARM::VLD4q32oddPseudo:
2525     case ARM::VLD4q8oddPseudo_UPD:
2526     case ARM::VLD4q16oddPseudo_UPD:
2527     case ARM::VLD4q32oddPseudo_UPD:
2528     case ARM::VLD3DUPd8Pseudo:
2529     case ARM::VLD3DUPd16Pseudo:
2530     case ARM::VLD3DUPd32Pseudo:
2531     case ARM::VLD3DUPd8Pseudo_UPD:
2532     case ARM::VLD3DUPd16Pseudo_UPD:
2533     case ARM::VLD3DUPd32Pseudo_UPD:
2534     case ARM::VLD4DUPd8Pseudo:
2535     case ARM::VLD4DUPd16Pseudo:
2536     case ARM::VLD4DUPd32Pseudo:
2537     case ARM::VLD4DUPd8Pseudo_UPD:
2538     case ARM::VLD4DUPd16Pseudo_UPD:
2539     case ARM::VLD4DUPd32Pseudo_UPD:
2540     case ARM::VLD2DUPq8EvenPseudo:
2541     case ARM::VLD2DUPq8OddPseudo:
2542     case ARM::VLD2DUPq16EvenPseudo:
2543     case ARM::VLD2DUPq16OddPseudo:
2544     case ARM::VLD2DUPq32EvenPseudo:
2545     case ARM::VLD2DUPq32OddPseudo:
2546     case ARM::VLD3DUPq8EvenPseudo:
2547     case ARM::VLD3DUPq8OddPseudo:
2548     case ARM::VLD3DUPq16EvenPseudo:
2549     case ARM::VLD3DUPq16OddPseudo:
2550     case ARM::VLD3DUPq32EvenPseudo:
2551     case ARM::VLD3DUPq32OddPseudo:
2552     case ARM::VLD4DUPq8EvenPseudo:
2553     case ARM::VLD4DUPq8OddPseudo:
2554     case ARM::VLD4DUPq16EvenPseudo:
2555     case ARM::VLD4DUPq16OddPseudo:
2556     case ARM::VLD4DUPq32EvenPseudo:
2557     case ARM::VLD4DUPq32OddPseudo:
2558       ExpandVLD(MBBI);
2559       return true;
2560 
2561     case ARM::VST2q8Pseudo:
2562     case ARM::VST2q16Pseudo:
2563     case ARM::VST2q32Pseudo:
2564     case ARM::VST2q8PseudoWB_fixed:
2565     case ARM::VST2q16PseudoWB_fixed:
2566     case ARM::VST2q32PseudoWB_fixed:
2567     case ARM::VST2q8PseudoWB_register:
2568     case ARM::VST2q16PseudoWB_register:
2569     case ARM::VST2q32PseudoWB_register:
2570     case ARM::VST3d8Pseudo:
2571     case ARM::VST3d16Pseudo:
2572     case ARM::VST3d32Pseudo:
2573     case ARM::VST1d8TPseudo:
2574     case ARM::VST1d16TPseudo:
2575     case ARM::VST1d32TPseudo:
2576     case ARM::VST1d64TPseudo:
2577     case ARM::VST3d8Pseudo_UPD:
2578     case ARM::VST3d16Pseudo_UPD:
2579     case ARM::VST3d32Pseudo_UPD:
2580     case ARM::VST1d64TPseudoWB_fixed:
2581     case ARM::VST1d64TPseudoWB_register:
2582     case ARM::VST3q8Pseudo_UPD:
2583     case ARM::VST3q16Pseudo_UPD:
2584     case ARM::VST3q32Pseudo_UPD:
2585     case ARM::VST3q8oddPseudo:
2586     case ARM::VST3q16oddPseudo:
2587     case ARM::VST3q32oddPseudo:
2588     case ARM::VST3q8oddPseudo_UPD:
2589     case ARM::VST3q16oddPseudo_UPD:
2590     case ARM::VST3q32oddPseudo_UPD:
2591     case ARM::VST4d8Pseudo:
2592     case ARM::VST4d16Pseudo:
2593     case ARM::VST4d32Pseudo:
2594     case ARM::VST1d8QPseudo:
2595     case ARM::VST1d16QPseudo:
2596     case ARM::VST1d32QPseudo:
2597     case ARM::VST1d64QPseudo:
2598     case ARM::VST4d8Pseudo_UPD:
2599     case ARM::VST4d16Pseudo_UPD:
2600     case ARM::VST4d32Pseudo_UPD:
2601     case ARM::VST1d64QPseudoWB_fixed:
2602     case ARM::VST1d64QPseudoWB_register:
2603     case ARM::VST1q8HighQPseudo:
2604     case ARM::VST1q8LowQPseudo_UPD:
2605     case ARM::VST1q8HighTPseudo:
2606     case ARM::VST1q8LowTPseudo_UPD:
2607     case ARM::VST1q16HighQPseudo:
2608     case ARM::VST1q16LowQPseudo_UPD:
2609     case ARM::VST1q16HighTPseudo:
2610     case ARM::VST1q16LowTPseudo_UPD:
2611     case ARM::VST1q32HighQPseudo:
2612     case ARM::VST1q32LowQPseudo_UPD:
2613     case ARM::VST1q32HighTPseudo:
2614     case ARM::VST1q32LowTPseudo_UPD:
2615     case ARM::VST1q64HighQPseudo:
2616     case ARM::VST1q64LowQPseudo_UPD:
2617     case ARM::VST1q64HighTPseudo:
2618     case ARM::VST1q64LowTPseudo_UPD:
2619     case ARM::VST4q8Pseudo_UPD:
2620     case ARM::VST4q16Pseudo_UPD:
2621     case ARM::VST4q32Pseudo_UPD:
2622     case ARM::VST4q8oddPseudo:
2623     case ARM::VST4q16oddPseudo:
2624     case ARM::VST4q32oddPseudo:
2625     case ARM::VST4q8oddPseudo_UPD:
2626     case ARM::VST4q16oddPseudo_UPD:
2627     case ARM::VST4q32oddPseudo_UPD:
2628       ExpandVST(MBBI);
2629       return true;
2630 
2631     case ARM::VLD1LNq8Pseudo:
2632     case ARM::VLD1LNq16Pseudo:
2633     case ARM::VLD1LNq32Pseudo:
2634     case ARM::VLD1LNq8Pseudo_UPD:
2635     case ARM::VLD1LNq16Pseudo_UPD:
2636     case ARM::VLD1LNq32Pseudo_UPD:
2637     case ARM::VLD2LNd8Pseudo:
2638     case ARM::VLD2LNd16Pseudo:
2639     case ARM::VLD2LNd32Pseudo:
2640     case ARM::VLD2LNq16Pseudo:
2641     case ARM::VLD2LNq32Pseudo:
2642     case ARM::VLD2LNd8Pseudo_UPD:
2643     case ARM::VLD2LNd16Pseudo_UPD:
2644     case ARM::VLD2LNd32Pseudo_UPD:
2645     case ARM::VLD2LNq16Pseudo_UPD:
2646     case ARM::VLD2LNq32Pseudo_UPD:
2647     case ARM::VLD3LNd8Pseudo:
2648     case ARM::VLD3LNd16Pseudo:
2649     case ARM::VLD3LNd32Pseudo:
2650     case ARM::VLD3LNq16Pseudo:
2651     case ARM::VLD3LNq32Pseudo:
2652     case ARM::VLD3LNd8Pseudo_UPD:
2653     case ARM::VLD3LNd16Pseudo_UPD:
2654     case ARM::VLD3LNd32Pseudo_UPD:
2655     case ARM::VLD3LNq16Pseudo_UPD:
2656     case ARM::VLD3LNq32Pseudo_UPD:
2657     case ARM::VLD4LNd8Pseudo:
2658     case ARM::VLD4LNd16Pseudo:
2659     case ARM::VLD4LNd32Pseudo:
2660     case ARM::VLD4LNq16Pseudo:
2661     case ARM::VLD4LNq32Pseudo:
2662     case ARM::VLD4LNd8Pseudo_UPD:
2663     case ARM::VLD4LNd16Pseudo_UPD:
2664     case ARM::VLD4LNd32Pseudo_UPD:
2665     case ARM::VLD4LNq16Pseudo_UPD:
2666     case ARM::VLD4LNq32Pseudo_UPD:
2667     case ARM::VST1LNq8Pseudo:
2668     case ARM::VST1LNq16Pseudo:
2669     case ARM::VST1LNq32Pseudo:
2670     case ARM::VST1LNq8Pseudo_UPD:
2671     case ARM::VST1LNq16Pseudo_UPD:
2672     case ARM::VST1LNq32Pseudo_UPD:
2673     case ARM::VST2LNd8Pseudo:
2674     case ARM::VST2LNd16Pseudo:
2675     case ARM::VST2LNd32Pseudo:
2676     case ARM::VST2LNq16Pseudo:
2677     case ARM::VST2LNq32Pseudo:
2678     case ARM::VST2LNd8Pseudo_UPD:
2679     case ARM::VST2LNd16Pseudo_UPD:
2680     case ARM::VST2LNd32Pseudo_UPD:
2681     case ARM::VST2LNq16Pseudo_UPD:
2682     case ARM::VST2LNq32Pseudo_UPD:
2683     case ARM::VST3LNd8Pseudo:
2684     case ARM::VST3LNd16Pseudo:
2685     case ARM::VST3LNd32Pseudo:
2686     case ARM::VST3LNq16Pseudo:
2687     case ARM::VST3LNq32Pseudo:
2688     case ARM::VST3LNd8Pseudo_UPD:
2689     case ARM::VST3LNd16Pseudo_UPD:
2690     case ARM::VST3LNd32Pseudo_UPD:
2691     case ARM::VST3LNq16Pseudo_UPD:
2692     case ARM::VST3LNq32Pseudo_UPD:
2693     case ARM::VST4LNd8Pseudo:
2694     case ARM::VST4LNd16Pseudo:
2695     case ARM::VST4LNd32Pseudo:
2696     case ARM::VST4LNq16Pseudo:
2697     case ARM::VST4LNq32Pseudo:
2698     case ARM::VST4LNd8Pseudo_UPD:
2699     case ARM::VST4LNd16Pseudo_UPD:
2700     case ARM::VST4LNd32Pseudo_UPD:
2701     case ARM::VST4LNq16Pseudo_UPD:
2702     case ARM::VST4LNq32Pseudo_UPD:
2703       ExpandLaneOp(MBBI);
2704       return true;
2705 
2706     case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false); return true;
2707     case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false); return true;
2708     case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true); return true;
2709     case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true); return true;
2710 
2711     case ARM::CMP_SWAP_8:
2712       if (STI->isThumb())
2713         return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXB, ARM::t2STREXB,
2714                               ARM::tUXTB, NextMBBI);
2715       else
2716         return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXB, ARM::STREXB,
2717                               ARM::UXTB, NextMBBI);
2718     case ARM::CMP_SWAP_16:
2719       if (STI->isThumb())
2720         return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXH, ARM::t2STREXH,
2721                               ARM::tUXTH, NextMBBI);
2722       else
2723         return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXH, ARM::STREXH,
2724                               ARM::UXTH, NextMBBI);
2725     case ARM::CMP_SWAP_32:
2726       if (STI->isThumb())
2727         return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREX, ARM::t2STREX, 0,
2728                               NextMBBI);
2729       else
2730         return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREX, ARM::STREX, 0, NextMBBI);
2731 
2732     case ARM::CMP_SWAP_64:
2733       return ExpandCMP_SWAP_64(MBB, MBBI, NextMBBI);
2734 
2735     case ARM::tBL_PUSHLR:
2736     case ARM::BL_PUSHLR: {
2737       const bool Thumb = Opcode == ARM::tBL_PUSHLR;
2738       Register Reg = MI.getOperand(0).getReg();
2739       assert(Reg == ARM::LR && "expect LR register!");
2740       MachineInstrBuilder MIB;
2741       if (Thumb) {
2742         // push {lr}
2743         BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPUSH))
2744             .add(predOps(ARMCC::AL))
2745             .addReg(Reg);
2746 
2747         // bl __gnu_mcount_nc
2748         MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tBL));
2749       } else {
2750         // stmdb   sp!, {lr}
2751         BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::STMDB_UPD))
2752             .addReg(ARM::SP, RegState::Define)
2753             .addReg(ARM::SP)
2754             .add(predOps(ARMCC::AL))
2755             .addReg(Reg);
2756 
2757         // bl __gnu_mcount_nc
2758         MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::BL));
2759       }
2760       MIB.cloneMemRefs(MI);
2761       for (unsigned i = 1; i < MI.getNumOperands(); ++i) MIB.add(MI.getOperand(i));
2762       MI.eraseFromParent();
2763       return true;
2764     }
2765     case ARM::LOADDUAL:
2766     case ARM::STOREDUAL: {
2767       Register PairReg = MI.getOperand(0).getReg();
2768 
2769       MachineInstrBuilder MIB =
2770           BuildMI(MBB, MBBI, MI.getDebugLoc(),
2771                   TII->get(Opcode == ARM::LOADDUAL ? ARM::LDRD : ARM::STRD))
2772               .addReg(TRI->getSubReg(PairReg, ARM::gsub_0),
2773                       Opcode == ARM::LOADDUAL ? RegState::Define : 0)
2774               .addReg(TRI->getSubReg(PairReg, ARM::gsub_1),
2775                       Opcode == ARM::LOADDUAL ? RegState::Define : 0);
2776       for (unsigned i = 1; i < MI.getNumOperands(); i++)
2777         MIB.add(MI.getOperand(i));
2778       MIB.add(predOps(ARMCC::AL));
2779       MIB.cloneMemRefs(MI);
2780       MI.eraseFromParent();
2781       return true;
2782     }
2783   }
2784 }
2785 
ExpandMBB(MachineBasicBlock & MBB)2786 bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
2787   bool Modified = false;
2788 
2789   MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2790   while (MBBI != E) {
2791     MachineBasicBlock::iterator NMBBI = std::next(MBBI);
2792     Modified |= ExpandMI(MBB, MBBI, NMBBI);
2793     MBBI = NMBBI;
2794   }
2795 
2796   return Modified;
2797 }
2798 
runOnMachineFunction(MachineFunction & MF)2799 bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
2800   STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
2801   TII = STI->getInstrInfo();
2802   TRI = STI->getRegisterInfo();
2803   AFI = MF.getInfo<ARMFunctionInfo>();
2804 
2805   LLVM_DEBUG(dbgs() << "********** ARM EXPAND PSEUDO INSTRUCTIONS **********\n"
2806                     << "********** Function: " << MF.getName() << '\n');
2807 
2808   bool Modified = false;
2809   for (MachineBasicBlock &MBB : MF)
2810     Modified |= ExpandMBB(MBB);
2811   if (VerifyARMPseudo)
2812     MF.verify(this, "After expanding ARM pseudo instructions.");
2813 
2814   LLVM_DEBUG(dbgs() << "***************************************************\n");
2815   return Modified;
2816 }
2817 
2818 /// createARMExpandPseudoPass - returns an instance of the pseudo instruction
2819 /// expansion pass.
createARMExpandPseudoPass()2820 FunctionPass *llvm::createARMExpandPseudoPass() {
2821   return new ARMExpandPseudo();
2822 }
2823