1 //===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling, if-conversion, and other late
11 // optimizations. This pass should be run after register allocation but before
12 // the post-regalloc scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMBaseRegisterInfo.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "MCTargetDesc/ARMAddressingModes.h"
23 #include "llvm/CodeGen/LivePhysRegs.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/Support/Debug.h"
28
29 using namespace llvm;
30
31 #define DEBUG_TYPE "arm-pseudo"
32
33 static cl::opt<bool>
34 VerifyARMPseudo("verify-arm-pseudo-expand", cl::Hidden,
35 cl::desc("Verify machine code after expanding ARM pseudos"));
36
37 #define ARM_EXPAND_PSEUDO_NAME "ARM pseudo instruction expansion pass"
38
39 namespace {
40 class ARMExpandPseudo : public MachineFunctionPass {
41 public:
42 static char ID;
ARMExpandPseudo()43 ARMExpandPseudo() : MachineFunctionPass(ID) {}
44
45 const ARMBaseInstrInfo *TII;
46 const TargetRegisterInfo *TRI;
47 const ARMSubtarget *STI;
48 ARMFunctionInfo *AFI;
49
50 bool runOnMachineFunction(MachineFunction &Fn) override;
51
getRequiredProperties() const52 MachineFunctionProperties getRequiredProperties() const override {
53 return MachineFunctionProperties().set(
54 MachineFunctionProperties::Property::NoVRegs);
55 }
56
getPassName() const57 StringRef getPassName() const override {
58 return ARM_EXPAND_PSEUDO_NAME;
59 }
60
61 private:
62 void TransferImpOps(MachineInstr &OldMI,
63 MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI);
64 bool ExpandMI(MachineBasicBlock &MBB,
65 MachineBasicBlock::iterator MBBI,
66 MachineBasicBlock::iterator &NextMBBI);
67 bool ExpandMBB(MachineBasicBlock &MBB);
68 void ExpandVLD(MachineBasicBlock::iterator &MBBI);
69 void ExpandVST(MachineBasicBlock::iterator &MBBI);
70 void ExpandLaneOp(MachineBasicBlock::iterator &MBBI);
71 void ExpandVTBL(MachineBasicBlock::iterator &MBBI,
72 unsigned Opc, bool IsExt);
73 void ExpandMQQPRLoadStore(MachineBasicBlock::iterator &MBBI);
74 void ExpandMOV32BitImm(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator &MBBI);
76 void CMSEClearGPRegs(MachineBasicBlock &MBB,
77 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
78 const SmallVectorImpl<unsigned> &ClearRegs,
79 unsigned ClobberReg);
80 MachineBasicBlock &CMSEClearFPRegs(MachineBasicBlock &MBB,
81 MachineBasicBlock::iterator MBBI);
82 MachineBasicBlock &CMSEClearFPRegsV8(MachineBasicBlock &MBB,
83 MachineBasicBlock::iterator MBBI,
84 const BitVector &ClearRegs);
85 MachineBasicBlock &CMSEClearFPRegsV81(MachineBasicBlock &MBB,
86 MachineBasicBlock::iterator MBBI,
87 const BitVector &ClearRegs);
88 void CMSESaveClearFPRegs(MachineBasicBlock &MBB,
89 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
90 const LivePhysRegs &LiveRegs,
91 SmallVectorImpl<unsigned> &AvailableRegs);
92 void CMSESaveClearFPRegsV8(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
94 const LivePhysRegs &LiveRegs,
95 SmallVectorImpl<unsigned> &ScratchRegs);
96 void CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
97 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
98 const LivePhysRegs &LiveRegs);
99 void CMSERestoreFPRegs(MachineBasicBlock &MBB,
100 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
101 SmallVectorImpl<unsigned> &AvailableRegs);
102 void CMSERestoreFPRegsV8(MachineBasicBlock &MBB,
103 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
104 SmallVectorImpl<unsigned> &AvailableRegs);
105 void CMSERestoreFPRegsV81(MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
107 SmallVectorImpl<unsigned> &AvailableRegs);
108 bool ExpandCMP_SWAP(MachineBasicBlock &MBB,
109 MachineBasicBlock::iterator MBBI, unsigned LdrexOp,
110 unsigned StrexOp, unsigned UxtOp,
111 MachineBasicBlock::iterator &NextMBBI);
112
113 bool ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator MBBI,
115 MachineBasicBlock::iterator &NextMBBI);
116 };
117 char ARMExpandPseudo::ID = 0;
118 }
119
INITIALIZE_PASS(ARMExpandPseudo,DEBUG_TYPE,ARM_EXPAND_PSEUDO_NAME,false,false)120 INITIALIZE_PASS(ARMExpandPseudo, DEBUG_TYPE, ARM_EXPAND_PSEUDO_NAME, false,
121 false)
122
123 /// TransferImpOps - Transfer implicit operands on the pseudo instruction to
124 /// the instructions created from the expansion.
125 void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
126 MachineInstrBuilder &UseMI,
127 MachineInstrBuilder &DefMI) {
128 const MCInstrDesc &Desc = OldMI.getDesc();
129 for (const MachineOperand &MO :
130 llvm::drop_begin(OldMI.operands(), Desc.getNumOperands())) {
131 assert(MO.isReg() && MO.getReg());
132 if (MO.isUse())
133 UseMI.add(MO);
134 else
135 DefMI.add(MO);
136 }
137 }
138
139 namespace {
140 // Constants for register spacing in NEON load/store instructions.
141 // For quad-register load-lane and store-lane pseudo instructors, the
142 // spacing is initially assumed to be EvenDblSpc, and that is changed to
143 // OddDblSpc depending on the lane number operand.
144 enum NEONRegSpacing {
145 SingleSpc,
146 SingleLowSpc , // Single spacing, low registers, three and four vectors.
147 SingleHighQSpc, // Single spacing, high registers, four vectors.
148 SingleHighTSpc, // Single spacing, high registers, three vectors.
149 EvenDblSpc,
150 OddDblSpc
151 };
152
153 // Entries for NEON load/store information table. The table is sorted by
154 // PseudoOpc for fast binary-search lookups.
155 struct NEONLdStTableEntry {
156 uint16_t PseudoOpc;
157 uint16_t RealOpc;
158 bool IsLoad;
159 bool isUpdating;
160 bool hasWritebackOperand;
161 uint8_t RegSpacing; // One of type NEONRegSpacing
162 uint8_t NumRegs; // D registers loaded or stored
163 uint8_t RegElts; // elements per D register; used for lane ops
164 // FIXME: Temporary flag to denote whether the real instruction takes
165 // a single register (like the encoding) or all of the registers in
166 // the list (like the asm syntax and the isel DAG). When all definitions
167 // are converted to take only the single encoded register, this will
168 // go away.
169 bool copyAllListRegs;
170
171 // Comparison methods for binary search of the table.
operator <__anonee5484a30211::NEONLdStTableEntry172 bool operator<(const NEONLdStTableEntry &TE) const {
173 return PseudoOpc < TE.PseudoOpc;
174 }
operator <(const NEONLdStTableEntry & TE,unsigned PseudoOpc)175 friend bool operator<(const NEONLdStTableEntry &TE, unsigned PseudoOpc) {
176 return TE.PseudoOpc < PseudoOpc;
177 }
operator <(unsigned PseudoOpc,const NEONLdStTableEntry & TE)178 friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned PseudoOpc,
179 const NEONLdStTableEntry &TE) {
180 return PseudoOpc < TE.PseudoOpc;
181 }
182 };
183 }
184
185 static const NEONLdStTableEntry NEONLdStTable[] = {
186 { ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, false, EvenDblSpc, 1, 4 ,true},
187 { ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, true, EvenDblSpc, 1, 4 ,true},
188 { ARM::VLD1LNq32Pseudo, ARM::VLD1LNd32, true, false, false, EvenDblSpc, 1, 2 ,true},
189 { ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, true, EvenDblSpc, 1, 2 ,true},
190 { ARM::VLD1LNq8Pseudo, ARM::VLD1LNd8, true, false, false, EvenDblSpc, 1, 8 ,true},
191 { ARM::VLD1LNq8Pseudo_UPD, ARM::VLD1LNd8_UPD, true, true, true, EvenDblSpc, 1, 8 ,true},
192
193 { ARM::VLD1d16QPseudo, ARM::VLD1d16Q, true, false, false, SingleSpc, 4, 4 ,false},
194 { ARM::VLD1d16QPseudoWB_fixed, ARM::VLD1d16Qwb_fixed, true, true, false, SingleSpc, 4, 4 ,false},
195 { ARM::VLD1d16QPseudoWB_register, ARM::VLD1d16Qwb_register, true, true, true, SingleSpc, 4, 4 ,false},
196 { ARM::VLD1d16TPseudo, ARM::VLD1d16T, true, false, false, SingleSpc, 3, 4 ,false},
197 { ARM::VLD1d16TPseudoWB_fixed, ARM::VLD1d16Twb_fixed, true, true, false, SingleSpc, 3, 4 ,false},
198 { ARM::VLD1d16TPseudoWB_register, ARM::VLD1d16Twb_register, true, true, true, SingleSpc, 3, 4 ,false},
199
200 { ARM::VLD1d32QPseudo, ARM::VLD1d32Q, true, false, false, SingleSpc, 4, 2 ,false},
201 { ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d32Qwb_fixed, true, true, false, SingleSpc, 4, 2 ,false},
202 { ARM::VLD1d32QPseudoWB_register, ARM::VLD1d32Qwb_register, true, true, true, SingleSpc, 4, 2 ,false},
203 { ARM::VLD1d32TPseudo, ARM::VLD1d32T, true, false, false, SingleSpc, 3, 2 ,false},
204 { ARM::VLD1d32TPseudoWB_fixed, ARM::VLD1d32Twb_fixed, true, true, false, SingleSpc, 3, 2 ,false},
205 { ARM::VLD1d32TPseudoWB_register, ARM::VLD1d32Twb_register, true, true, true, SingleSpc, 3, 2 ,false},
206
207 { ARM::VLD1d64QPseudo, ARM::VLD1d64Q, true, false, false, SingleSpc, 4, 1 ,false},
208 { ARM::VLD1d64QPseudoWB_fixed, ARM::VLD1d64Qwb_fixed, true, true, false, SingleSpc, 4, 1 ,false},
209 { ARM::VLD1d64QPseudoWB_register, ARM::VLD1d64Qwb_register, true, true, true, SingleSpc, 4, 1 ,false},
210 { ARM::VLD1d64TPseudo, ARM::VLD1d64T, true, false, false, SingleSpc, 3, 1 ,false},
211 { ARM::VLD1d64TPseudoWB_fixed, ARM::VLD1d64Twb_fixed, true, true, false, SingleSpc, 3, 1 ,false},
212 { ARM::VLD1d64TPseudoWB_register, ARM::VLD1d64Twb_register, true, true, true, SingleSpc, 3, 1 ,false},
213
214 { ARM::VLD1d8QPseudo, ARM::VLD1d8Q, true, false, false, SingleSpc, 4, 8 ,false},
215 { ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d8Qwb_fixed, true, true, false, SingleSpc, 4, 8 ,false},
216 { ARM::VLD1d8QPseudoWB_register, ARM::VLD1d8Qwb_register, true, true, true, SingleSpc, 4, 8 ,false},
217 { ARM::VLD1d8TPseudo, ARM::VLD1d8T, true, false, false, SingleSpc, 3, 8 ,false},
218 { ARM::VLD1d8TPseudoWB_fixed, ARM::VLD1d8Twb_fixed, true, true, false, SingleSpc, 3, 8 ,false},
219 { ARM::VLD1d8TPseudoWB_register, ARM::VLD1d8Twb_register, true, true, true, SingleSpc, 3, 8 ,false},
220
221 { ARM::VLD1q16HighQPseudo, ARM::VLD1d16Q, true, false, false, SingleHighQSpc, 4, 4 ,false},
222 { ARM::VLD1q16HighQPseudo_UPD, ARM::VLD1d16Qwb_fixed, true, true, true, SingleHighQSpc, 4, 4 ,false},
223 { ARM::VLD1q16HighTPseudo, ARM::VLD1d16T, true, false, false, SingleHighTSpc, 3, 4 ,false},
224 { ARM::VLD1q16HighTPseudo_UPD, ARM::VLD1d16Twb_fixed, true, true, true, SingleHighTSpc, 3, 4 ,false},
225 { ARM::VLD1q16LowQPseudo_UPD, ARM::VLD1d16Qwb_fixed, true, true, true, SingleLowSpc, 4, 4 ,false},
226 { ARM::VLD1q16LowTPseudo_UPD, ARM::VLD1d16Twb_fixed, true, true, true, SingleLowSpc, 3, 4 ,false},
227
228 { ARM::VLD1q32HighQPseudo, ARM::VLD1d32Q, true, false, false, SingleHighQSpc, 4, 2 ,false},
229 { ARM::VLD1q32HighQPseudo_UPD, ARM::VLD1d32Qwb_fixed, true, true, true, SingleHighQSpc, 4, 2 ,false},
230 { ARM::VLD1q32HighTPseudo, ARM::VLD1d32T, true, false, false, SingleHighTSpc, 3, 2 ,false},
231 { ARM::VLD1q32HighTPseudo_UPD, ARM::VLD1d32Twb_fixed, true, true, true, SingleHighTSpc, 3, 2 ,false},
232 { ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1d32Qwb_fixed, true, true, true, SingleLowSpc, 4, 2 ,false},
233 { ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1d32Twb_fixed, true, true, true, SingleLowSpc, 3, 2 ,false},
234
235 { ARM::VLD1q64HighQPseudo, ARM::VLD1d64Q, true, false, false, SingleHighQSpc, 4, 1 ,false},
236 { ARM::VLD1q64HighQPseudo_UPD, ARM::VLD1d64Qwb_fixed, true, true, true, SingleHighQSpc, 4, 1 ,false},
237 { ARM::VLD1q64HighTPseudo, ARM::VLD1d64T, true, false, false, SingleHighTSpc, 3, 1 ,false},
238 { ARM::VLD1q64HighTPseudo_UPD, ARM::VLD1d64Twb_fixed, true, true, true, SingleHighTSpc, 3, 1 ,false},
239 { ARM::VLD1q64LowQPseudo_UPD, ARM::VLD1d64Qwb_fixed, true, true, true, SingleLowSpc, 4, 1 ,false},
240 { ARM::VLD1q64LowTPseudo_UPD, ARM::VLD1d64Twb_fixed, true, true, true, SingleLowSpc, 3, 1 ,false},
241
242 { ARM::VLD1q8HighQPseudo, ARM::VLD1d8Q, true, false, false, SingleHighQSpc, 4, 8 ,false},
243 { ARM::VLD1q8HighQPseudo_UPD, ARM::VLD1d8Qwb_fixed, true, true, true, SingleHighQSpc, 4, 8 ,false},
244 { ARM::VLD1q8HighTPseudo, ARM::VLD1d8T, true, false, false, SingleHighTSpc, 3, 8 ,false},
245 { ARM::VLD1q8HighTPseudo_UPD, ARM::VLD1d8Twb_fixed, true, true, true, SingleHighTSpc, 3, 8 ,false},
246 { ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1d8Qwb_fixed, true, true, true, SingleLowSpc, 4, 8 ,false},
247 { ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1d8Twb_fixed, true, true, true, SingleLowSpc, 3, 8 ,false},
248
249 { ARM::VLD2DUPq16EvenPseudo, ARM::VLD2DUPd16x2, true, false, false, EvenDblSpc, 2, 4 ,false},
250 { ARM::VLD2DUPq16OddPseudo, ARM::VLD2DUPd16x2, true, false, false, OddDblSpc, 2, 4 ,false},
251 { ARM::VLD2DUPq16OddPseudoWB_fixed, ARM::VLD2DUPd16x2wb_fixed, true, true, false, OddDblSpc, 2, 4 ,false},
252 { ARM::VLD2DUPq16OddPseudoWB_register, ARM::VLD2DUPd16x2wb_register, true, true, true, OddDblSpc, 2, 4 ,false},
253 { ARM::VLD2DUPq32EvenPseudo, ARM::VLD2DUPd32x2, true, false, false, EvenDblSpc, 2, 2 ,false},
254 { ARM::VLD2DUPq32OddPseudo, ARM::VLD2DUPd32x2, true, false, false, OddDblSpc, 2, 2 ,false},
255 { ARM::VLD2DUPq32OddPseudoWB_fixed, ARM::VLD2DUPd32x2wb_fixed, true, true, false, OddDblSpc, 2, 2 ,false},
256 { ARM::VLD2DUPq32OddPseudoWB_register, ARM::VLD2DUPd32x2wb_register, true, true, true, OddDblSpc, 2, 2 ,false},
257 { ARM::VLD2DUPq8EvenPseudo, ARM::VLD2DUPd8x2, true, false, false, EvenDblSpc, 2, 8 ,false},
258 { ARM::VLD2DUPq8OddPseudo, ARM::VLD2DUPd8x2, true, false, false, OddDblSpc, 2, 8 ,false},
259 { ARM::VLD2DUPq8OddPseudoWB_fixed, ARM::VLD2DUPd8x2wb_fixed, true, true, false, OddDblSpc, 2, 8 ,false},
260 { ARM::VLD2DUPq8OddPseudoWB_register, ARM::VLD2DUPd8x2wb_register, true, true, true, OddDblSpc, 2, 8 ,false},
261
262 { ARM::VLD2LNd16Pseudo, ARM::VLD2LNd16, true, false, false, SingleSpc, 2, 4 ,true},
263 { ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, true, SingleSpc, 2, 4 ,true},
264 { ARM::VLD2LNd32Pseudo, ARM::VLD2LNd32, true, false, false, SingleSpc, 2, 2 ,true},
265 { ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, true, SingleSpc, 2, 2 ,true},
266 { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd8, true, false, false, SingleSpc, 2, 8 ,true},
267 { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd8_UPD, true, true, true, SingleSpc, 2, 8 ,true},
268 { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq16, true, false, false, EvenDblSpc, 2, 4 ,true},
269 { ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, true, EvenDblSpc, 2, 4 ,true},
270 { ARM::VLD2LNq32Pseudo, ARM::VLD2LNq32, true, false, false, EvenDblSpc, 2, 2 ,true},
271 { ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, true, EvenDblSpc, 2, 2 ,true},
272
273 { ARM::VLD2q16Pseudo, ARM::VLD2q16, true, false, false, SingleSpc, 4, 4 ,false},
274 { ARM::VLD2q16PseudoWB_fixed, ARM::VLD2q16wb_fixed, true, true, false, SingleSpc, 4, 4 ,false},
275 { ARM::VLD2q16PseudoWB_register, ARM::VLD2q16wb_register, true, true, true, SingleSpc, 4, 4 ,false},
276 { ARM::VLD2q32Pseudo, ARM::VLD2q32, true, false, false, SingleSpc, 4, 2 ,false},
277 { ARM::VLD2q32PseudoWB_fixed, ARM::VLD2q32wb_fixed, true, true, false, SingleSpc, 4, 2 ,false},
278 { ARM::VLD2q32PseudoWB_register, ARM::VLD2q32wb_register, true, true, true, SingleSpc, 4, 2 ,false},
279 { ARM::VLD2q8Pseudo, ARM::VLD2q8, true, false, false, SingleSpc, 4, 8 ,false},
280 { ARM::VLD2q8PseudoWB_fixed, ARM::VLD2q8wb_fixed, true, true, false, SingleSpc, 4, 8 ,false},
281 { ARM::VLD2q8PseudoWB_register, ARM::VLD2q8wb_register, true, true, true, SingleSpc, 4, 8 ,false},
282
283 { ARM::VLD3DUPd16Pseudo, ARM::VLD3DUPd16, true, false, false, SingleSpc, 3, 4,true},
284 { ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, true, SingleSpc, 3, 4,true},
285 { ARM::VLD3DUPd32Pseudo, ARM::VLD3DUPd32, true, false, false, SingleSpc, 3, 2,true},
286 { ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, true, SingleSpc, 3, 2,true},
287 { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd8, true, false, false, SingleSpc, 3, 8,true},
288 { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd8_UPD, true, true, true, SingleSpc, 3, 8,true},
289 { ARM::VLD3DUPq16EvenPseudo, ARM::VLD3DUPq16, true, false, false, EvenDblSpc, 3, 4 ,true},
290 { ARM::VLD3DUPq16OddPseudo, ARM::VLD3DUPq16, true, false, false, OddDblSpc, 3, 4 ,true},
291 { ARM::VLD3DUPq16OddPseudo_UPD, ARM::VLD3DUPq16_UPD, true, true, true, OddDblSpc, 3, 4 ,true},
292 { ARM::VLD3DUPq32EvenPseudo, ARM::VLD3DUPq32, true, false, false, EvenDblSpc, 3, 2 ,true},
293 { ARM::VLD3DUPq32OddPseudo, ARM::VLD3DUPq32, true, false, false, OddDblSpc, 3, 2 ,true},
294 { ARM::VLD3DUPq32OddPseudo_UPD, ARM::VLD3DUPq32_UPD, true, true, true, OddDblSpc, 3, 2 ,true},
295 { ARM::VLD3DUPq8EvenPseudo, ARM::VLD3DUPq8, true, false, false, EvenDblSpc, 3, 8 ,true},
296 { ARM::VLD3DUPq8OddPseudo, ARM::VLD3DUPq8, true, false, false, OddDblSpc, 3, 8 ,true},
297 { ARM::VLD3DUPq8OddPseudo_UPD, ARM::VLD3DUPq8_UPD, true, true, true, OddDblSpc, 3, 8 ,true},
298
299 { ARM::VLD3LNd16Pseudo, ARM::VLD3LNd16, true, false, false, SingleSpc, 3, 4 ,true},
300 { ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
301 { ARM::VLD3LNd32Pseudo, ARM::VLD3LNd32, true, false, false, SingleSpc, 3, 2 ,true},
302 { ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
303 { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd8, true, false, false, SingleSpc, 3, 8 ,true},
304 { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
305 { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq16, true, false, false, EvenDblSpc, 3, 4 ,true},
306 { ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
307 { ARM::VLD3LNq32Pseudo, ARM::VLD3LNq32, true, false, false, EvenDblSpc, 3, 2 ,true},
308 { ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
309
310 { ARM::VLD3d16Pseudo, ARM::VLD3d16, true, false, false, SingleSpc, 3, 4 ,true},
311 { ARM::VLD3d16Pseudo_UPD, ARM::VLD3d16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
312 { ARM::VLD3d32Pseudo, ARM::VLD3d32, true, false, false, SingleSpc, 3, 2 ,true},
313 { ARM::VLD3d32Pseudo_UPD, ARM::VLD3d32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
314 { ARM::VLD3d8Pseudo, ARM::VLD3d8, true, false, false, SingleSpc, 3, 8 ,true},
315 { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
316
317 { ARM::VLD3q16Pseudo_UPD, ARM::VLD3q16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
318 { ARM::VLD3q16oddPseudo, ARM::VLD3q16, true, false, false, OddDblSpc, 3, 4 ,true},
319 { ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, true, OddDblSpc, 3, 4 ,true},
320 { ARM::VLD3q32Pseudo_UPD, ARM::VLD3q32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
321 { ARM::VLD3q32oddPseudo, ARM::VLD3q32, true, false, false, OddDblSpc, 3, 2 ,true},
322 { ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, true, OddDblSpc, 3, 2 ,true},
323 { ARM::VLD3q8Pseudo_UPD, ARM::VLD3q8_UPD, true, true, true, EvenDblSpc, 3, 8 ,true},
324 { ARM::VLD3q8oddPseudo, ARM::VLD3q8, true, false, false, OddDblSpc, 3, 8 ,true},
325 { ARM::VLD3q8oddPseudo_UPD, ARM::VLD3q8_UPD, true, true, true, OddDblSpc, 3, 8 ,true},
326
327 { ARM::VLD4DUPd16Pseudo, ARM::VLD4DUPd16, true, false, false, SingleSpc, 4, 4,true},
328 { ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, true, SingleSpc, 4, 4,true},
329 { ARM::VLD4DUPd32Pseudo, ARM::VLD4DUPd32, true, false, false, SingleSpc, 4, 2,true},
330 { ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, true, SingleSpc, 4, 2,true},
331 { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd8, true, false, false, SingleSpc, 4, 8,true},
332 { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd8_UPD, true, true, true, SingleSpc, 4, 8,true},
333 { ARM::VLD4DUPq16EvenPseudo, ARM::VLD4DUPq16, true, false, false, EvenDblSpc, 4, 4 ,true},
334 { ARM::VLD4DUPq16OddPseudo, ARM::VLD4DUPq16, true, false, false, OddDblSpc, 4, 4 ,true},
335 { ARM::VLD4DUPq16OddPseudo_UPD, ARM::VLD4DUPq16_UPD, true, true, true, OddDblSpc, 4, 4 ,true},
336 { ARM::VLD4DUPq32EvenPseudo, ARM::VLD4DUPq32, true, false, false, EvenDblSpc, 4, 2 ,true},
337 { ARM::VLD4DUPq32OddPseudo, ARM::VLD4DUPq32, true, false, false, OddDblSpc, 4, 2 ,true},
338 { ARM::VLD4DUPq32OddPseudo_UPD, ARM::VLD4DUPq32_UPD, true, true, true, OddDblSpc, 4, 2 ,true},
339 { ARM::VLD4DUPq8EvenPseudo, ARM::VLD4DUPq8, true, false, false, EvenDblSpc, 4, 8 ,true},
340 { ARM::VLD4DUPq8OddPseudo, ARM::VLD4DUPq8, true, false, false, OddDblSpc, 4, 8 ,true},
341 { ARM::VLD4DUPq8OddPseudo_UPD, ARM::VLD4DUPq8_UPD, true, true, true, OddDblSpc, 4, 8 ,true},
342
343 { ARM::VLD4LNd16Pseudo, ARM::VLD4LNd16, true, false, false, SingleSpc, 4, 4 ,true},
344 { ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
345 { ARM::VLD4LNd32Pseudo, ARM::VLD4LNd32, true, false, false, SingleSpc, 4, 2 ,true},
346 { ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
347 { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd8, true, false, false, SingleSpc, 4, 8 ,true},
348 { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
349 { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq16, true, false, false, EvenDblSpc, 4, 4 ,true},
350 { ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
351 { ARM::VLD4LNq32Pseudo, ARM::VLD4LNq32, true, false, false, EvenDblSpc, 4, 2 ,true},
352 { ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
353
354 { ARM::VLD4d16Pseudo, ARM::VLD4d16, true, false, false, SingleSpc, 4, 4 ,true},
355 { ARM::VLD4d16Pseudo_UPD, ARM::VLD4d16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
356 { ARM::VLD4d32Pseudo, ARM::VLD4d32, true, false, false, SingleSpc, 4, 2 ,true},
357 { ARM::VLD4d32Pseudo_UPD, ARM::VLD4d32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
358 { ARM::VLD4d8Pseudo, ARM::VLD4d8, true, false, false, SingleSpc, 4, 8 ,true},
359 { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
360
361 { ARM::VLD4q16Pseudo_UPD, ARM::VLD4q16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
362 { ARM::VLD4q16oddPseudo, ARM::VLD4q16, true, false, false, OddDblSpc, 4, 4 ,true},
363 { ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, true, OddDblSpc, 4, 4 ,true},
364 { ARM::VLD4q32Pseudo_UPD, ARM::VLD4q32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
365 { ARM::VLD4q32oddPseudo, ARM::VLD4q32, true, false, false, OddDblSpc, 4, 2 ,true},
366 { ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, true, OddDblSpc, 4, 2 ,true},
367 { ARM::VLD4q8Pseudo_UPD, ARM::VLD4q8_UPD, true, true, true, EvenDblSpc, 4, 8 ,true},
368 { ARM::VLD4q8oddPseudo, ARM::VLD4q8, true, false, false, OddDblSpc, 4, 8 ,true},
369 { ARM::VLD4q8oddPseudo_UPD, ARM::VLD4q8_UPD, true, true, true, OddDblSpc, 4, 8 ,true},
370
371 { ARM::VST1LNq16Pseudo, ARM::VST1LNd16, false, false, false, EvenDblSpc, 1, 4 ,true},
372 { ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD, false, true, true, EvenDblSpc, 1, 4 ,true},
373 { ARM::VST1LNq32Pseudo, ARM::VST1LNd32, false, false, false, EvenDblSpc, 1, 2 ,true},
374 { ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD, false, true, true, EvenDblSpc, 1, 2 ,true},
375 { ARM::VST1LNq8Pseudo, ARM::VST1LNd8, false, false, false, EvenDblSpc, 1, 8 ,true},
376 { ARM::VST1LNq8Pseudo_UPD, ARM::VST1LNd8_UPD, false, true, true, EvenDblSpc, 1, 8 ,true},
377
378 { ARM::VST1d16QPseudo, ARM::VST1d16Q, false, false, false, SingleSpc, 4, 4 ,false},
379 { ARM::VST1d16QPseudoWB_fixed, ARM::VST1d16Qwb_fixed, false, true, false, SingleSpc, 4, 4 ,false},
380 { ARM::VST1d16QPseudoWB_register, ARM::VST1d16Qwb_register, false, true, true, SingleSpc, 4, 4 ,false},
381 { ARM::VST1d16TPseudo, ARM::VST1d16T, false, false, false, SingleSpc, 3, 4 ,false},
382 { ARM::VST1d16TPseudoWB_fixed, ARM::VST1d16Twb_fixed, false, true, false, SingleSpc, 3, 4 ,false},
383 { ARM::VST1d16TPseudoWB_register, ARM::VST1d16Twb_register, false, true, true, SingleSpc, 3, 4 ,false},
384
385 { ARM::VST1d32QPseudo, ARM::VST1d32Q, false, false, false, SingleSpc, 4, 2 ,false},
386 { ARM::VST1d32QPseudoWB_fixed, ARM::VST1d32Qwb_fixed, false, true, false, SingleSpc, 4, 2 ,false},
387 { ARM::VST1d32QPseudoWB_register, ARM::VST1d32Qwb_register, false, true, true, SingleSpc, 4, 2 ,false},
388 { ARM::VST1d32TPseudo, ARM::VST1d32T, false, false, false, SingleSpc, 3, 2 ,false},
389 { ARM::VST1d32TPseudoWB_fixed, ARM::VST1d32Twb_fixed, false, true, false, SingleSpc, 3, 2 ,false},
390 { ARM::VST1d32TPseudoWB_register, ARM::VST1d32Twb_register, false, true, true, SingleSpc, 3, 2 ,false},
391
392 { ARM::VST1d64QPseudo, ARM::VST1d64Q, false, false, false, SingleSpc, 4, 1 ,false},
393 { ARM::VST1d64QPseudoWB_fixed, ARM::VST1d64Qwb_fixed, false, true, false, SingleSpc, 4, 1 ,false},
394 { ARM::VST1d64QPseudoWB_register, ARM::VST1d64Qwb_register, false, true, true, SingleSpc, 4, 1 ,false},
395 { ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, false, SingleSpc, 3, 1 ,false},
396 { ARM::VST1d64TPseudoWB_fixed, ARM::VST1d64Twb_fixed, false, true, false, SingleSpc, 3, 1 ,false},
397 { ARM::VST1d64TPseudoWB_register, ARM::VST1d64Twb_register, false, true, true, SingleSpc, 3, 1 ,false},
398
399 { ARM::VST1d8QPseudo, ARM::VST1d8Q, false, false, false, SingleSpc, 4, 8 ,false},
400 { ARM::VST1d8QPseudoWB_fixed, ARM::VST1d8Qwb_fixed, false, true, false, SingleSpc, 4, 8 ,false},
401 { ARM::VST1d8QPseudoWB_register, ARM::VST1d8Qwb_register, false, true, true, SingleSpc, 4, 8 ,false},
402 { ARM::VST1d8TPseudo, ARM::VST1d8T, false, false, false, SingleSpc, 3, 8 ,false},
403 { ARM::VST1d8TPseudoWB_fixed, ARM::VST1d8Twb_fixed, false, true, false, SingleSpc, 3, 8 ,false},
404 { ARM::VST1d8TPseudoWB_register, ARM::VST1d8Twb_register, false, true, true, SingleSpc, 3, 8 ,false},
405
406 { ARM::VST1q16HighQPseudo, ARM::VST1d16Q, false, false, false, SingleHighQSpc, 4, 4 ,false},
407 { ARM::VST1q16HighQPseudo_UPD, ARM::VST1d16Qwb_fixed, false, true, true, SingleHighQSpc, 4, 8 ,false},
408 { ARM::VST1q16HighTPseudo, ARM::VST1d16T, false, false, false, SingleHighTSpc, 3, 4 ,false},
409 { ARM::VST1q16HighTPseudo_UPD, ARM::VST1d16Twb_fixed, false, true, true, SingleHighTSpc, 3, 4 ,false},
410 { ARM::VST1q16LowQPseudo_UPD, ARM::VST1d16Qwb_fixed, false, true, true, SingleLowSpc, 4, 4 ,false},
411 { ARM::VST1q16LowTPseudo_UPD, ARM::VST1d16Twb_fixed, false, true, true, SingleLowSpc, 3, 4 ,false},
412
413 { ARM::VST1q32HighQPseudo, ARM::VST1d32Q, false, false, false, SingleHighQSpc, 4, 2 ,false},
414 { ARM::VST1q32HighQPseudo_UPD, ARM::VST1d32Qwb_fixed, false, true, true, SingleHighQSpc, 4, 8 ,false},
415 { ARM::VST1q32HighTPseudo, ARM::VST1d32T, false, false, false, SingleHighTSpc, 3, 2 ,false},
416 { ARM::VST1q32HighTPseudo_UPD, ARM::VST1d32Twb_fixed, false, true, true, SingleHighTSpc, 3, 2 ,false},
417 { ARM::VST1q32LowQPseudo_UPD, ARM::VST1d32Qwb_fixed, false, true, true, SingleLowSpc, 4, 2 ,false},
418 { ARM::VST1q32LowTPseudo_UPD, ARM::VST1d32Twb_fixed, false, true, true, SingleLowSpc, 3, 2 ,false},
419
420 { ARM::VST1q64HighQPseudo, ARM::VST1d64Q, false, false, false, SingleHighQSpc, 4, 1 ,false},
421 { ARM::VST1q64HighQPseudo_UPD, ARM::VST1d64Qwb_fixed, false, true, true, SingleHighQSpc, 4, 8 ,false},
422 { ARM::VST1q64HighTPseudo, ARM::VST1d64T, false, false, false, SingleHighTSpc, 3, 1 ,false},
423 { ARM::VST1q64HighTPseudo_UPD, ARM::VST1d64Twb_fixed, false, true, true, SingleHighTSpc, 3, 1 ,false},
424 { ARM::VST1q64LowQPseudo_UPD, ARM::VST1d64Qwb_fixed, false, true, true, SingleLowSpc, 4, 1 ,false},
425 { ARM::VST1q64LowTPseudo_UPD, ARM::VST1d64Twb_fixed, false, true, true, SingleLowSpc, 3, 1 ,false},
426
427 { ARM::VST1q8HighQPseudo, ARM::VST1d8Q, false, false, false, SingleHighQSpc, 4, 8 ,false},
428 { ARM::VST1q8HighQPseudo_UPD, ARM::VST1d8Qwb_fixed, false, true, true, SingleHighQSpc, 4, 8 ,false},
429 { ARM::VST1q8HighTPseudo, ARM::VST1d8T, false, false, false, SingleHighTSpc, 3, 8 ,false},
430 { ARM::VST1q8HighTPseudo_UPD, ARM::VST1d8Twb_fixed, false, true, true, SingleHighTSpc, 3, 8 ,false},
431 { ARM::VST1q8LowQPseudo_UPD, ARM::VST1d8Qwb_fixed, false, true, true, SingleLowSpc, 4, 8 ,false},
432 { ARM::VST1q8LowTPseudo_UPD, ARM::VST1d8Twb_fixed, false, true, true, SingleLowSpc, 3, 8 ,false},
433
434 { ARM::VST2LNd16Pseudo, ARM::VST2LNd16, false, false, false, SingleSpc, 2, 4 ,true},
435 { ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, true, SingleSpc, 2, 4 ,true},
436 { ARM::VST2LNd32Pseudo, ARM::VST2LNd32, false, false, false, SingleSpc, 2, 2 ,true},
437 { ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, true, SingleSpc, 2, 2 ,true},
438 { ARM::VST2LNd8Pseudo, ARM::VST2LNd8, false, false, false, SingleSpc, 2, 8 ,true},
439 { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd8_UPD, false, true, true, SingleSpc, 2, 8 ,true},
440 { ARM::VST2LNq16Pseudo, ARM::VST2LNq16, false, false, false, EvenDblSpc, 2, 4,true},
441 { ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, true, EvenDblSpc, 2, 4,true},
442 { ARM::VST2LNq32Pseudo, ARM::VST2LNq32, false, false, false, EvenDblSpc, 2, 2,true},
443 { ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, true, EvenDblSpc, 2, 2,true},
444
445 { ARM::VST2q16Pseudo, ARM::VST2q16, false, false, false, SingleSpc, 4, 4 ,false},
446 { ARM::VST2q16PseudoWB_fixed, ARM::VST2q16wb_fixed, false, true, false, SingleSpc, 4, 4 ,false},
447 { ARM::VST2q16PseudoWB_register, ARM::VST2q16wb_register, false, true, true, SingleSpc, 4, 4 ,false},
448 { ARM::VST2q32Pseudo, ARM::VST2q32, false, false, false, SingleSpc, 4, 2 ,false},
449 { ARM::VST2q32PseudoWB_fixed, ARM::VST2q32wb_fixed, false, true, false, SingleSpc, 4, 2 ,false},
450 { ARM::VST2q32PseudoWB_register, ARM::VST2q32wb_register, false, true, true, SingleSpc, 4, 2 ,false},
451 { ARM::VST2q8Pseudo, ARM::VST2q8, false, false, false, SingleSpc, 4, 8 ,false},
452 { ARM::VST2q8PseudoWB_fixed, ARM::VST2q8wb_fixed, false, true, false, SingleSpc, 4, 8 ,false},
453 { ARM::VST2q8PseudoWB_register, ARM::VST2q8wb_register, false, true, true, SingleSpc, 4, 8 ,false},
454
455 { ARM::VST3LNd16Pseudo, ARM::VST3LNd16, false, false, false, SingleSpc, 3, 4 ,true},
456 { ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
457 { ARM::VST3LNd32Pseudo, ARM::VST3LNd32, false, false, false, SingleSpc, 3, 2 ,true},
458 { ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
459 { ARM::VST3LNd8Pseudo, ARM::VST3LNd8, false, false, false, SingleSpc, 3, 8 ,true},
460 { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
461 { ARM::VST3LNq16Pseudo, ARM::VST3LNq16, false, false, false, EvenDblSpc, 3, 4,true},
462 { ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, true, EvenDblSpc, 3, 4,true},
463 { ARM::VST3LNq32Pseudo, ARM::VST3LNq32, false, false, false, EvenDblSpc, 3, 2,true},
464 { ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, true, EvenDblSpc, 3, 2,true},
465
466 { ARM::VST3d16Pseudo, ARM::VST3d16, false, false, false, SingleSpc, 3, 4 ,true},
467 { ARM::VST3d16Pseudo_UPD, ARM::VST3d16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
468 { ARM::VST3d32Pseudo, ARM::VST3d32, false, false, false, SingleSpc, 3, 2 ,true},
469 { ARM::VST3d32Pseudo_UPD, ARM::VST3d32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
470 { ARM::VST3d8Pseudo, ARM::VST3d8, false, false, false, SingleSpc, 3, 8 ,true},
471 { ARM::VST3d8Pseudo_UPD, ARM::VST3d8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
472
473 { ARM::VST3q16Pseudo_UPD, ARM::VST3q16_UPD, false, true, true, EvenDblSpc, 3, 4 ,true},
474 { ARM::VST3q16oddPseudo, ARM::VST3q16, false, false, false, OddDblSpc, 3, 4 ,true},
475 { ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, true, OddDblSpc, 3, 4 ,true},
476 { ARM::VST3q32Pseudo_UPD, ARM::VST3q32_UPD, false, true, true, EvenDblSpc, 3, 2 ,true},
477 { ARM::VST3q32oddPseudo, ARM::VST3q32, false, false, false, OddDblSpc, 3, 2 ,true},
478 { ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, true, OddDblSpc, 3, 2 ,true},
479 { ARM::VST3q8Pseudo_UPD, ARM::VST3q8_UPD, false, true, true, EvenDblSpc, 3, 8 ,true},
480 { ARM::VST3q8oddPseudo, ARM::VST3q8, false, false, false, OddDblSpc, 3, 8 ,true},
481 { ARM::VST3q8oddPseudo_UPD, ARM::VST3q8_UPD, false, true, true, OddDblSpc, 3, 8 ,true},
482
483 { ARM::VST4LNd16Pseudo, ARM::VST4LNd16, false, false, false, SingleSpc, 4, 4 ,true},
484 { ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
485 { ARM::VST4LNd32Pseudo, ARM::VST4LNd32, false, false, false, SingleSpc, 4, 2 ,true},
486 { ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
487 { ARM::VST4LNd8Pseudo, ARM::VST4LNd8, false, false, false, SingleSpc, 4, 8 ,true},
488 { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
489 { ARM::VST4LNq16Pseudo, ARM::VST4LNq16, false, false, false, EvenDblSpc, 4, 4,true},
490 { ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, true, EvenDblSpc, 4, 4,true},
491 { ARM::VST4LNq32Pseudo, ARM::VST4LNq32, false, false, false, EvenDblSpc, 4, 2,true},
492 { ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, true, EvenDblSpc, 4, 2,true},
493
494 { ARM::VST4d16Pseudo, ARM::VST4d16, false, false, false, SingleSpc, 4, 4 ,true},
495 { ARM::VST4d16Pseudo_UPD, ARM::VST4d16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
496 { ARM::VST4d32Pseudo, ARM::VST4d32, false, false, false, SingleSpc, 4, 2 ,true},
497 { ARM::VST4d32Pseudo_UPD, ARM::VST4d32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
498 { ARM::VST4d8Pseudo, ARM::VST4d8, false, false, false, SingleSpc, 4, 8 ,true},
499 { ARM::VST4d8Pseudo_UPD, ARM::VST4d8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
500
501 { ARM::VST4q16Pseudo_UPD, ARM::VST4q16_UPD, false, true, true, EvenDblSpc, 4, 4 ,true},
502 { ARM::VST4q16oddPseudo, ARM::VST4q16, false, false, false, OddDblSpc, 4, 4 ,true},
503 { ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, true, OddDblSpc, 4, 4 ,true},
504 { ARM::VST4q32Pseudo_UPD, ARM::VST4q32_UPD, false, true, true, EvenDblSpc, 4, 2 ,true},
505 { ARM::VST4q32oddPseudo, ARM::VST4q32, false, false, false, OddDblSpc, 4, 2 ,true},
506 { ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, true, OddDblSpc, 4, 2 ,true},
507 { ARM::VST4q8Pseudo_UPD, ARM::VST4q8_UPD, false, true, true, EvenDblSpc, 4, 8 ,true},
508 { ARM::VST4q8oddPseudo, ARM::VST4q8, false, false, false, OddDblSpc, 4, 8 ,true},
509 { ARM::VST4q8oddPseudo_UPD, ARM::VST4q8_UPD, false, true, true, OddDblSpc, 4, 8 ,true}
510 };
511
512 /// LookupNEONLdSt - Search the NEONLdStTable for information about a NEON
513 /// load or store pseudo instruction.
LookupNEONLdSt(unsigned Opcode)514 static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
515 #ifndef NDEBUG
516 // Make sure the table is sorted.
517 static std::atomic<bool> TableChecked(false);
518 if (!TableChecked.load(std::memory_order_relaxed)) {
519 assert(llvm::is_sorted(NEONLdStTable) && "NEONLdStTable is not sorted!");
520 TableChecked.store(true, std::memory_order_relaxed);
521 }
522 #endif
523
524 auto I = llvm::lower_bound(NEONLdStTable, Opcode);
525 if (I != std::end(NEONLdStTable) && I->PseudoOpc == Opcode)
526 return I;
527 return nullptr;
528 }
529
530 /// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register,
531 /// corresponding to the specified register spacing. Not all of the results
532 /// are necessarily valid, e.g., a Q register only has 2 D subregisters.
GetDSubRegs(unsigned Reg,NEONRegSpacing RegSpc,const TargetRegisterInfo * TRI,unsigned & D0,unsigned & D1,unsigned & D2,unsigned & D3)533 static void GetDSubRegs(unsigned Reg, NEONRegSpacing RegSpc,
534 const TargetRegisterInfo *TRI, unsigned &D0,
535 unsigned &D1, unsigned &D2, unsigned &D3) {
536 if (RegSpc == SingleSpc || RegSpc == SingleLowSpc) {
537 D0 = TRI->getSubReg(Reg, ARM::dsub_0);
538 D1 = TRI->getSubReg(Reg, ARM::dsub_1);
539 D2 = TRI->getSubReg(Reg, ARM::dsub_2);
540 D3 = TRI->getSubReg(Reg, ARM::dsub_3);
541 } else if (RegSpc == SingleHighQSpc) {
542 D0 = TRI->getSubReg(Reg, ARM::dsub_4);
543 D1 = TRI->getSubReg(Reg, ARM::dsub_5);
544 D2 = TRI->getSubReg(Reg, ARM::dsub_6);
545 D3 = TRI->getSubReg(Reg, ARM::dsub_7);
546 } else if (RegSpc == SingleHighTSpc) {
547 D0 = TRI->getSubReg(Reg, ARM::dsub_3);
548 D1 = TRI->getSubReg(Reg, ARM::dsub_4);
549 D2 = TRI->getSubReg(Reg, ARM::dsub_5);
550 D3 = TRI->getSubReg(Reg, ARM::dsub_6);
551 } else if (RegSpc == EvenDblSpc) {
552 D0 = TRI->getSubReg(Reg, ARM::dsub_0);
553 D1 = TRI->getSubReg(Reg, ARM::dsub_2);
554 D2 = TRI->getSubReg(Reg, ARM::dsub_4);
555 D3 = TRI->getSubReg(Reg, ARM::dsub_6);
556 } else {
557 assert(RegSpc == OddDblSpc && "unknown register spacing");
558 D0 = TRI->getSubReg(Reg, ARM::dsub_1);
559 D1 = TRI->getSubReg(Reg, ARM::dsub_3);
560 D2 = TRI->getSubReg(Reg, ARM::dsub_5);
561 D3 = TRI->getSubReg(Reg, ARM::dsub_7);
562 }
563 }
564
565 /// ExpandVLD - Translate VLD pseudo instructions with Q, QQ or QQQQ register
566 /// operands to real VLD instructions with D register operands.
ExpandVLD(MachineBasicBlock::iterator & MBBI)567 void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
568 MachineInstr &MI = *MBBI;
569 MachineBasicBlock &MBB = *MI.getParent();
570 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
571
572 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
573 assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed");
574 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
575 unsigned NumRegs = TableEntry->NumRegs;
576
577 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
578 TII->get(TableEntry->RealOpc));
579 unsigned OpIdx = 0;
580
581 bool DstIsDead = MI.getOperand(OpIdx).isDead();
582 Register DstReg = MI.getOperand(OpIdx++).getReg();
583
584 bool IsVLD2DUP = TableEntry->RealOpc == ARM::VLD2DUPd8x2 ||
585 TableEntry->RealOpc == ARM::VLD2DUPd16x2 ||
586 TableEntry->RealOpc == ARM::VLD2DUPd32x2 ||
587 TableEntry->RealOpc == ARM::VLD2DUPd8x2wb_fixed ||
588 TableEntry->RealOpc == ARM::VLD2DUPd16x2wb_fixed ||
589 TableEntry->RealOpc == ARM::VLD2DUPd32x2wb_fixed ||
590 TableEntry->RealOpc == ARM::VLD2DUPd8x2wb_register ||
591 TableEntry->RealOpc == ARM::VLD2DUPd16x2wb_register ||
592 TableEntry->RealOpc == ARM::VLD2DUPd32x2wb_register;
593
594 if (IsVLD2DUP) {
595 unsigned SubRegIndex;
596 if (RegSpc == EvenDblSpc) {
597 SubRegIndex = ARM::dsub_0;
598 } else {
599 assert(RegSpc == OddDblSpc && "Unexpected spacing!");
600 SubRegIndex = ARM::dsub_1;
601 }
602 Register SubReg = TRI->getSubReg(DstReg, SubRegIndex);
603 unsigned DstRegPair = TRI->getMatchingSuperReg(SubReg, ARM::dsub_0,
604 &ARM::DPairSpcRegClass);
605 MIB.addReg(DstRegPair, RegState::Define | getDeadRegState(DstIsDead));
606 } else {
607 unsigned D0, D1, D2, D3;
608 GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
609 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
610 if (NumRegs > 1 && TableEntry->copyAllListRegs)
611 MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
612 if (NumRegs > 2 && TableEntry->copyAllListRegs)
613 MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
614 if (NumRegs > 3 && TableEntry->copyAllListRegs)
615 MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
616 }
617
618 if (TableEntry->isUpdating)
619 MIB.add(MI.getOperand(OpIdx++));
620
621 // Copy the addrmode6 operands.
622 MIB.add(MI.getOperand(OpIdx++));
623 MIB.add(MI.getOperand(OpIdx++));
624
625 // Copy the am6offset operand.
626 if (TableEntry->hasWritebackOperand) {
627 // TODO: The writing-back pseudo instructions we translate here are all
628 // defined to take am6offset nodes that are capable to represent both fixed
629 // and register forms. Some real instructions, however, do not rely on
630 // am6offset and have separate definitions for such forms. When this is the
631 // case, fixed forms do not take any offset nodes, so here we skip them for
632 // such instructions. Once all real and pseudo writing-back instructions are
633 // rewritten without use of am6offset nodes, this code will go away.
634 const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
635 if (TableEntry->RealOpc == ARM::VLD1d8Qwb_fixed ||
636 TableEntry->RealOpc == ARM::VLD1d16Qwb_fixed ||
637 TableEntry->RealOpc == ARM::VLD1d32Qwb_fixed ||
638 TableEntry->RealOpc == ARM::VLD1d64Qwb_fixed ||
639 TableEntry->RealOpc == ARM::VLD1d8Twb_fixed ||
640 TableEntry->RealOpc == ARM::VLD1d16Twb_fixed ||
641 TableEntry->RealOpc == ARM::VLD1d32Twb_fixed ||
642 TableEntry->RealOpc == ARM::VLD1d64Twb_fixed ||
643 TableEntry->RealOpc == ARM::VLD2DUPd8x2wb_fixed ||
644 TableEntry->RealOpc == ARM::VLD2DUPd16x2wb_fixed ||
645 TableEntry->RealOpc == ARM::VLD2DUPd32x2wb_fixed) {
646 assert(AM6Offset.getReg() == 0 &&
647 "A fixed writing-back pseudo instruction provides an offset "
648 "register!");
649 } else {
650 MIB.add(AM6Offset);
651 }
652 }
653
654 // For an instruction writing double-spaced subregs, the pseudo instruction
655 // has an extra operand that is a use of the super-register. Record the
656 // operand index and skip over it.
657 unsigned SrcOpIdx = 0;
658 if (!IsVLD2DUP) {
659 if (RegSpc == EvenDblSpc || RegSpc == OddDblSpc ||
660 RegSpc == SingleLowSpc || RegSpc == SingleHighQSpc ||
661 RegSpc == SingleHighTSpc)
662 SrcOpIdx = OpIdx++;
663 }
664
665 // Copy the predicate operands.
666 MIB.add(MI.getOperand(OpIdx++));
667 MIB.add(MI.getOperand(OpIdx++));
668
669 // Copy the super-register source operand used for double-spaced subregs over
670 // to the new instruction as an implicit operand.
671 if (SrcOpIdx != 0) {
672 MachineOperand MO = MI.getOperand(SrcOpIdx);
673 MO.setImplicit(true);
674 MIB.add(MO);
675 }
676 // Add an implicit def for the super-register.
677 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
678 TransferImpOps(MI, MIB, MIB);
679
680 // Transfer memoperands.
681 MIB.cloneMemRefs(MI);
682 MI.eraseFromParent();
683 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
684 }
685
686 /// ExpandVST - Translate VST pseudo instructions with Q, QQ or QQQQ register
687 /// operands to real VST instructions with D register operands.
ExpandVST(MachineBasicBlock::iterator & MBBI)688 void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
689 MachineInstr &MI = *MBBI;
690 MachineBasicBlock &MBB = *MI.getParent();
691 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
692
693 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
694 assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed");
695 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
696 unsigned NumRegs = TableEntry->NumRegs;
697
698 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
699 TII->get(TableEntry->RealOpc));
700 unsigned OpIdx = 0;
701 if (TableEntry->isUpdating)
702 MIB.add(MI.getOperand(OpIdx++));
703
704 // Copy the addrmode6 operands.
705 MIB.add(MI.getOperand(OpIdx++));
706 MIB.add(MI.getOperand(OpIdx++));
707
708 if (TableEntry->hasWritebackOperand) {
709 // TODO: The writing-back pseudo instructions we translate here are all
710 // defined to take am6offset nodes that are capable to represent both fixed
711 // and register forms. Some real instructions, however, do not rely on
712 // am6offset and have separate definitions for such forms. When this is the
713 // case, fixed forms do not take any offset nodes, so here we skip them for
714 // such instructions. Once all real and pseudo writing-back instructions are
715 // rewritten without use of am6offset nodes, this code will go away.
716 const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
717 if (TableEntry->RealOpc == ARM::VST1d8Qwb_fixed ||
718 TableEntry->RealOpc == ARM::VST1d16Qwb_fixed ||
719 TableEntry->RealOpc == ARM::VST1d32Qwb_fixed ||
720 TableEntry->RealOpc == ARM::VST1d64Qwb_fixed ||
721 TableEntry->RealOpc == ARM::VST1d8Twb_fixed ||
722 TableEntry->RealOpc == ARM::VST1d16Twb_fixed ||
723 TableEntry->RealOpc == ARM::VST1d32Twb_fixed ||
724 TableEntry->RealOpc == ARM::VST1d64Twb_fixed) {
725 assert(AM6Offset.getReg() == 0 &&
726 "A fixed writing-back pseudo instruction provides an offset "
727 "register!");
728 } else {
729 MIB.add(AM6Offset);
730 }
731 }
732
733 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
734 bool SrcIsUndef = MI.getOperand(OpIdx).isUndef();
735 Register SrcReg = MI.getOperand(OpIdx++).getReg();
736 unsigned D0, D1, D2, D3;
737 GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
738 MIB.addReg(D0, getUndefRegState(SrcIsUndef));
739 if (NumRegs > 1 && TableEntry->copyAllListRegs)
740 MIB.addReg(D1, getUndefRegState(SrcIsUndef));
741 if (NumRegs > 2 && TableEntry->copyAllListRegs)
742 MIB.addReg(D2, getUndefRegState(SrcIsUndef));
743 if (NumRegs > 3 && TableEntry->copyAllListRegs)
744 MIB.addReg(D3, getUndefRegState(SrcIsUndef));
745
746 // Copy the predicate operands.
747 MIB.add(MI.getOperand(OpIdx++));
748 MIB.add(MI.getOperand(OpIdx++));
749
750 if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg.
751 MIB->addRegisterKilled(SrcReg, TRI, true);
752 else if (!SrcIsUndef)
753 MIB.addReg(SrcReg, RegState::Implicit); // Add implicit uses for src reg.
754 TransferImpOps(MI, MIB, MIB);
755
756 // Transfer memoperands.
757 MIB.cloneMemRefs(MI);
758 MI.eraseFromParent();
759 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
760 }
761
762 /// ExpandLaneOp - Translate VLD*LN and VST*LN instructions with Q, QQ or QQQQ
763 /// register operands to real instructions with D register operands.
ExpandLaneOp(MachineBasicBlock::iterator & MBBI)764 void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
765 MachineInstr &MI = *MBBI;
766 MachineBasicBlock &MBB = *MI.getParent();
767 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
768
769 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
770 assert(TableEntry && "NEONLdStTable lookup failed");
771 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
772 unsigned NumRegs = TableEntry->NumRegs;
773 unsigned RegElts = TableEntry->RegElts;
774
775 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
776 TII->get(TableEntry->RealOpc));
777 unsigned OpIdx = 0;
778 // The lane operand is always the 3rd from last operand, before the 2
779 // predicate operands.
780 unsigned Lane = MI.getOperand(MI.getDesc().getNumOperands() - 3).getImm();
781
782 // Adjust the lane and spacing as needed for Q registers.
783 assert(RegSpc != OddDblSpc && "unexpected register spacing for VLD/VST-lane");
784 if (RegSpc == EvenDblSpc && Lane >= RegElts) {
785 RegSpc = OddDblSpc;
786 Lane -= RegElts;
787 }
788 assert(Lane < RegElts && "out of range lane for VLD/VST-lane");
789
790 unsigned D0 = 0, D1 = 0, D2 = 0, D3 = 0;
791 unsigned DstReg = 0;
792 bool DstIsDead = false;
793 if (TableEntry->IsLoad) {
794 DstIsDead = MI.getOperand(OpIdx).isDead();
795 DstReg = MI.getOperand(OpIdx++).getReg();
796 GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
797 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
798 if (NumRegs > 1)
799 MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
800 if (NumRegs > 2)
801 MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
802 if (NumRegs > 3)
803 MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
804 }
805
806 if (TableEntry->isUpdating)
807 MIB.add(MI.getOperand(OpIdx++));
808
809 // Copy the addrmode6 operands.
810 MIB.add(MI.getOperand(OpIdx++));
811 MIB.add(MI.getOperand(OpIdx++));
812 // Copy the am6offset operand.
813 if (TableEntry->hasWritebackOperand)
814 MIB.add(MI.getOperand(OpIdx++));
815
816 // Grab the super-register source.
817 MachineOperand MO = MI.getOperand(OpIdx++);
818 if (!TableEntry->IsLoad)
819 GetDSubRegs(MO.getReg(), RegSpc, TRI, D0, D1, D2, D3);
820
821 // Add the subregs as sources of the new instruction.
822 unsigned SrcFlags = (getUndefRegState(MO.isUndef()) |
823 getKillRegState(MO.isKill()));
824 MIB.addReg(D0, SrcFlags);
825 if (NumRegs > 1)
826 MIB.addReg(D1, SrcFlags);
827 if (NumRegs > 2)
828 MIB.addReg(D2, SrcFlags);
829 if (NumRegs > 3)
830 MIB.addReg(D3, SrcFlags);
831
832 // Add the lane number operand.
833 MIB.addImm(Lane);
834 OpIdx += 1;
835
836 // Copy the predicate operands.
837 MIB.add(MI.getOperand(OpIdx++));
838 MIB.add(MI.getOperand(OpIdx++));
839
840 // Copy the super-register source to be an implicit source.
841 MO.setImplicit(true);
842 MIB.add(MO);
843 if (TableEntry->IsLoad)
844 // Add an implicit def for the super-register.
845 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
846 TransferImpOps(MI, MIB, MIB);
847 // Transfer memoperands.
848 MIB.cloneMemRefs(MI);
849 MI.eraseFromParent();
850 }
851
852 /// ExpandVTBL - Translate VTBL and VTBX pseudo instructions with Q or QQ
853 /// register operands to real instructions with D register operands.
ExpandVTBL(MachineBasicBlock::iterator & MBBI,unsigned Opc,bool IsExt)854 void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
855 unsigned Opc, bool IsExt) {
856 MachineInstr &MI = *MBBI;
857 MachineBasicBlock &MBB = *MI.getParent();
858 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
859
860 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
861 unsigned OpIdx = 0;
862
863 // Transfer the destination register operand.
864 MIB.add(MI.getOperand(OpIdx++));
865 if (IsExt) {
866 MachineOperand VdSrc(MI.getOperand(OpIdx++));
867 MIB.add(VdSrc);
868 }
869
870 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
871 Register SrcReg = MI.getOperand(OpIdx++).getReg();
872 unsigned D0, D1, D2, D3;
873 GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3);
874 MIB.addReg(D0);
875
876 // Copy the other source register operand.
877 MachineOperand VmSrc(MI.getOperand(OpIdx++));
878 MIB.add(VmSrc);
879
880 // Copy the predicate operands.
881 MIB.add(MI.getOperand(OpIdx++));
882 MIB.add(MI.getOperand(OpIdx++));
883
884 // Add an implicit kill and use for the super-reg.
885 MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill));
886 TransferImpOps(MI, MIB, MIB);
887 MI.eraseFromParent();
888 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
889 }
890
ExpandMQQPRLoadStore(MachineBasicBlock::iterator & MBBI)891 void ARMExpandPseudo::ExpandMQQPRLoadStore(MachineBasicBlock::iterator &MBBI) {
892 MachineInstr &MI = *MBBI;
893 MachineBasicBlock &MBB = *MI.getParent();
894 unsigned NewOpc =
895 MI.getOpcode() == ARM::MQQPRStore || MI.getOpcode() == ARM::MQQQQPRStore
896 ? ARM::VSTMDIA
897 : ARM::VLDMDIA;
898 MachineInstrBuilder MIB =
899 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
900
901 unsigned Flags = getKillRegState(MI.getOperand(0).isKill()) |
902 getDefRegState(MI.getOperand(0).isDef());
903 Register SrcReg = MI.getOperand(0).getReg();
904
905 // Copy the destination register.
906 MIB.add(MI.getOperand(1));
907 MIB.add(predOps(ARMCC::AL));
908 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_0), Flags);
909 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_1), Flags);
910 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_2), Flags);
911 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_3), Flags);
912 if (MI.getOpcode() == ARM::MQQQQPRStore ||
913 MI.getOpcode() == ARM::MQQQQPRLoad) {
914 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_4), Flags);
915 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_5), Flags);
916 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_6), Flags);
917 MIB.addReg(TRI->getSubReg(SrcReg, ARM::dsub_7), Flags);
918 }
919
920 if (NewOpc == ARM::VSTMDIA)
921 MIB.addReg(SrcReg, RegState::Implicit);
922
923 TransferImpOps(MI, MIB, MIB);
924 MIB.cloneMemRefs(MI);
925 MI.eraseFromParent();
926 }
927
IsAnAddressOperand(const MachineOperand & MO)928 static bool IsAnAddressOperand(const MachineOperand &MO) {
929 // This check is overly conservative. Unless we are certain that the machine
930 // operand is not a symbol reference, we return that it is a symbol reference.
931 // This is important as the load pair may not be split up Windows.
932 switch (MO.getType()) {
933 case MachineOperand::MO_Register:
934 case MachineOperand::MO_Immediate:
935 case MachineOperand::MO_CImmediate:
936 case MachineOperand::MO_FPImmediate:
937 case MachineOperand::MO_ShuffleMask:
938 return false;
939 case MachineOperand::MO_MachineBasicBlock:
940 return true;
941 case MachineOperand::MO_FrameIndex:
942 return false;
943 case MachineOperand::MO_ConstantPoolIndex:
944 case MachineOperand::MO_TargetIndex:
945 case MachineOperand::MO_JumpTableIndex:
946 case MachineOperand::MO_ExternalSymbol:
947 case MachineOperand::MO_GlobalAddress:
948 case MachineOperand::MO_BlockAddress:
949 return true;
950 case MachineOperand::MO_RegisterMask:
951 case MachineOperand::MO_RegisterLiveOut:
952 return false;
953 case MachineOperand::MO_Metadata:
954 case MachineOperand::MO_MCSymbol:
955 return true;
956 case MachineOperand::MO_DbgInstrRef:
957 case MachineOperand::MO_CFIIndex:
958 return false;
959 case MachineOperand::MO_IntrinsicID:
960 case MachineOperand::MO_Predicate:
961 llvm_unreachable("should not exist post-isel");
962 }
963 llvm_unreachable("unhandled machine operand type");
964 }
965
makeImplicit(const MachineOperand & MO)966 static MachineOperand makeImplicit(const MachineOperand &MO) {
967 MachineOperand NewMO = MO;
968 NewMO.setImplicit();
969 return NewMO;
970 }
971
ExpandMOV32BitImm(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI)972 void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
973 MachineBasicBlock::iterator &MBBI) {
974 MachineInstr &MI = *MBBI;
975 unsigned Opcode = MI.getOpcode();
976 Register PredReg;
977 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
978 Register DstReg = MI.getOperand(0).getReg();
979 bool DstIsDead = MI.getOperand(0).isDead();
980 bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
981 const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1);
982 bool RequiresBundling = STI->isTargetWindows() && IsAnAddressOperand(MO);
983 MachineInstrBuilder LO16, HI16;
984 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
985
986 if (!STI->hasV6T2Ops() &&
987 (Opcode == ARM::MOVi32imm || Opcode == ARM::MOVCCi32imm)) {
988 // FIXME Windows CE supports older ARM CPUs
989 assert(!STI->isTargetWindows() && "Windows on ARM requires ARMv7+");
990
991 assert (MO.isImm() && "MOVi32imm w/ non-immediate source operand!");
992 unsigned ImmVal = (unsigned)MO.getImm();
993 unsigned SOImmValV1 = 0, SOImmValV2 = 0;
994
995 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) { // Expand into a movi + orr.
996 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi), DstReg);
997 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::ORRri))
998 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
999 .addReg(DstReg);
1000 SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
1001 SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
1002 } else { // Expand into a mvn + sub.
1003 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MVNi), DstReg);
1004 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri))
1005 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
1006 .addReg(DstReg);
1007 SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(-ImmVal);
1008 SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(-ImmVal);
1009 SOImmValV1 = ~(-SOImmValV1);
1010 }
1011
1012 unsigned MIFlags = MI.getFlags();
1013 LO16 = LO16.addImm(SOImmValV1);
1014 HI16 = HI16.addImm(SOImmValV2);
1015 LO16.cloneMemRefs(MI);
1016 HI16.cloneMemRefs(MI);
1017 LO16.setMIFlags(MIFlags);
1018 HI16.setMIFlags(MIFlags);
1019 LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
1020 HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
1021 if (isCC)
1022 LO16.add(makeImplicit(MI.getOperand(1)));
1023 TransferImpOps(MI, LO16, HI16);
1024 MI.eraseFromParent();
1025 return;
1026 }
1027
1028 unsigned LO16Opc = 0;
1029 unsigned HI16Opc = 0;
1030 unsigned MIFlags = MI.getFlags();
1031 if (Opcode == ARM::t2MOVi32imm || Opcode == ARM::t2MOVCCi32imm) {
1032 LO16Opc = ARM::t2MOVi16;
1033 HI16Opc = ARM::t2MOVTi16;
1034 } else {
1035 LO16Opc = ARM::MOVi16;
1036 HI16Opc = ARM::MOVTi16;
1037 }
1038
1039 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LO16Opc), DstReg);
1040 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc))
1041 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
1042 .addReg(DstReg);
1043
1044 LO16.setMIFlags(MIFlags);
1045 HI16.setMIFlags(MIFlags);
1046
1047 switch (MO.getType()) {
1048 case MachineOperand::MO_Immediate: {
1049 unsigned Imm = MO.getImm();
1050 unsigned Lo16 = Imm & 0xffff;
1051 unsigned Hi16 = (Imm >> 16) & 0xffff;
1052 LO16 = LO16.addImm(Lo16);
1053 HI16 = HI16.addImm(Hi16);
1054 break;
1055 }
1056 case MachineOperand::MO_ExternalSymbol: {
1057 const char *ES = MO.getSymbolName();
1058 unsigned TF = MO.getTargetFlags();
1059 LO16 = LO16.addExternalSymbol(ES, TF | ARMII::MO_LO16);
1060 HI16 = HI16.addExternalSymbol(ES, TF | ARMII::MO_HI16);
1061 break;
1062 }
1063 default: {
1064 const GlobalValue *GV = MO.getGlobal();
1065 unsigned TF = MO.getTargetFlags();
1066 LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
1067 HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
1068 break;
1069 }
1070 }
1071
1072 LO16.cloneMemRefs(MI);
1073 HI16.cloneMemRefs(MI);
1074 LO16.addImm(Pred).addReg(PredReg);
1075 HI16.addImm(Pred).addReg(PredReg);
1076
1077 if (RequiresBundling)
1078 finalizeBundle(MBB, LO16->getIterator(), MBBI->getIterator());
1079
1080 if (isCC)
1081 LO16.add(makeImplicit(MI.getOperand(1)));
1082 TransferImpOps(MI, LO16, HI16);
1083 MI.eraseFromParent();
1084 LLVM_DEBUG(dbgs() << "To: "; LO16.getInstr()->dump(););
1085 LLVM_DEBUG(dbgs() << "And: "; HI16.getInstr()->dump(););
1086 }
1087
1088 // The size of the area, accessed by that VLSTM/VLLDM
1089 // S0-S31 + FPSCR + 8 more bytes (VPR + pad, or just pad)
1090 static const int CMSE_FP_SAVE_SIZE = 136;
1091
determineGPRegsToClear(const MachineInstr & MI,const std::initializer_list<unsigned> & Regs,SmallVectorImpl<unsigned> & ClearRegs)1092 static void determineGPRegsToClear(const MachineInstr &MI,
1093 const std::initializer_list<unsigned> &Regs,
1094 SmallVectorImpl<unsigned> &ClearRegs) {
1095 SmallVector<unsigned, 4> OpRegs;
1096 for (const MachineOperand &Op : MI.operands()) {
1097 if (!Op.isReg() || !Op.isUse())
1098 continue;
1099 OpRegs.push_back(Op.getReg());
1100 }
1101 llvm::sort(OpRegs);
1102
1103 std::set_difference(Regs.begin(), Regs.end(), OpRegs.begin(), OpRegs.end(),
1104 std::back_inserter(ClearRegs));
1105 }
1106
CMSEClearGPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const SmallVectorImpl<unsigned> & ClearRegs,unsigned ClobberReg)1107 void ARMExpandPseudo::CMSEClearGPRegs(
1108 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
1109 const DebugLoc &DL, const SmallVectorImpl<unsigned> &ClearRegs,
1110 unsigned ClobberReg) {
1111
1112 if (STI->hasV8_1MMainlineOps()) {
1113 // Clear the registers using the CLRM instruction.
1114 MachineInstrBuilder CLRM =
1115 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2CLRM)).add(predOps(ARMCC::AL));
1116 for (unsigned R : ClearRegs)
1117 CLRM.addReg(R, RegState::Define);
1118 CLRM.addReg(ARM::APSR, RegState::Define);
1119 CLRM.addReg(ARM::CPSR, RegState::Define | RegState::Implicit);
1120 } else {
1121 // Clear the registers and flags by copying ClobberReg into them.
1122 // (Baseline can't do a high register clear in one instruction).
1123 for (unsigned Reg : ClearRegs) {
1124 if (Reg == ClobberReg)
1125 continue;
1126 BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVr), Reg)
1127 .addReg(ClobberReg)
1128 .add(predOps(ARMCC::AL));
1129 }
1130
1131 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2MSR_M))
1132 .addImm(STI->hasDSP() ? 0xc00 : 0x800)
1133 .addReg(ClobberReg)
1134 .add(predOps(ARMCC::AL));
1135 }
1136 }
1137
1138 // Find which FP registers need to be cleared. The parameter `ClearRegs` is
1139 // initialised with all elements set to true, and this function resets all the
1140 // bits, which correspond to register uses. Returns true if any floating point
1141 // register is defined, false otherwise.
determineFPRegsToClear(const MachineInstr & MI,BitVector & ClearRegs)1142 static bool determineFPRegsToClear(const MachineInstr &MI,
1143 BitVector &ClearRegs) {
1144 bool DefFP = false;
1145 for (const MachineOperand &Op : MI.operands()) {
1146 if (!Op.isReg())
1147 continue;
1148
1149 Register Reg = Op.getReg();
1150 if (Op.isDef()) {
1151 if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1152 (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1153 (Reg >= ARM::S0 && Reg <= ARM::S31))
1154 DefFP = true;
1155 continue;
1156 }
1157
1158 if (Reg >= ARM::Q0 && Reg <= ARM::Q7) {
1159 int R = Reg - ARM::Q0;
1160 ClearRegs.reset(R * 4, (R + 1) * 4);
1161 } else if (Reg >= ARM::D0 && Reg <= ARM::D15) {
1162 int R = Reg - ARM::D0;
1163 ClearRegs.reset(R * 2, (R + 1) * 2);
1164 } else if (Reg >= ARM::S0 && Reg <= ARM::S31) {
1165 ClearRegs[Reg - ARM::S0] = false;
1166 }
1167 }
1168 return DefFP;
1169 }
1170
1171 MachineBasicBlock &
CMSEClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI)1172 ARMExpandPseudo::CMSEClearFPRegs(MachineBasicBlock &MBB,
1173 MachineBasicBlock::iterator MBBI) {
1174 BitVector ClearRegs(16, true);
1175 (void)determineFPRegsToClear(*MBBI, ClearRegs);
1176
1177 if (STI->hasV8_1MMainlineOps())
1178 return CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1179 else
1180 return CMSEClearFPRegsV8(MBB, MBBI, ClearRegs);
1181 }
1182
1183 // Clear the FP registers for v8.0-M, by copying over the content
1184 // of LR. Uses R12 as a scratch register.
1185 MachineBasicBlock &
CMSEClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1186 ARMExpandPseudo::CMSEClearFPRegsV8(MachineBasicBlock &MBB,
1187 MachineBasicBlock::iterator MBBI,
1188 const BitVector &ClearRegs) {
1189 if (!STI->hasFPRegs())
1190 return MBB;
1191
1192 auto &RetI = *MBBI;
1193 const DebugLoc &DL = RetI.getDebugLoc();
1194
1195 // If optimising for minimum size, clear FP registers unconditionally.
1196 // Otherwise, check the CONTROL.SFPA (Secure Floating-Point Active) bit and
1197 // don't clear them if they belong to the non-secure state.
1198 MachineBasicBlock *ClearBB, *DoneBB;
1199 if (STI->hasMinSize()) {
1200 ClearBB = DoneBB = &MBB;
1201 } else {
1202 MachineFunction *MF = MBB.getParent();
1203 ClearBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1204 DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1205
1206 MF->insert(++MBB.getIterator(), ClearBB);
1207 MF->insert(++ClearBB->getIterator(), DoneBB);
1208
1209 DoneBB->splice(DoneBB->end(), &MBB, MBBI, MBB.end());
1210 DoneBB->transferSuccessors(&MBB);
1211 MBB.addSuccessor(ClearBB);
1212 MBB.addSuccessor(DoneBB);
1213 ClearBB->addSuccessor(DoneBB);
1214
1215 // At the new basic blocks we need to have live-in the registers, used
1216 // for the return value as well as LR, used to clear registers.
1217 for (const MachineOperand &Op : RetI.operands()) {
1218 if (!Op.isReg())
1219 continue;
1220 Register Reg = Op.getReg();
1221 if (Reg == ARM::NoRegister || Reg == ARM::LR)
1222 continue;
1223 assert(Reg.isPhysical() && "Unallocated register");
1224 ClearBB->addLiveIn(Reg);
1225 DoneBB->addLiveIn(Reg);
1226 }
1227 ClearBB->addLiveIn(ARM::LR);
1228 DoneBB->addLiveIn(ARM::LR);
1229
1230 // Read the CONTROL register.
1231 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2MRS_M), ARM::R12)
1232 .addImm(20)
1233 .add(predOps(ARMCC::AL));
1234 // Check bit 3 (SFPA).
1235 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2TSTri))
1236 .addReg(ARM::R12)
1237 .addImm(8)
1238 .add(predOps(ARMCC::AL));
1239 // If SFPA is clear, jump over ClearBB to DoneBB.
1240 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::tBcc))
1241 .addMBB(DoneBB)
1242 .addImm(ARMCC::EQ)
1243 .addReg(ARM::CPSR, RegState::Kill);
1244 }
1245
1246 // Emit the clearing sequence
1247 for (unsigned D = 0; D < 8; D++) {
1248 // Attempt to clear as double
1249 if (ClearRegs[D * 2 + 0] && ClearRegs[D * 2 + 1]) {
1250 unsigned Reg = ARM::D0 + D;
1251 BuildMI(ClearBB, DL, TII->get(ARM::VMOVDRR), Reg)
1252 .addReg(ARM::LR)
1253 .addReg(ARM::LR)
1254 .add(predOps(ARMCC::AL));
1255 } else {
1256 // Clear first part as single
1257 if (ClearRegs[D * 2 + 0]) {
1258 unsigned Reg = ARM::S0 + D * 2;
1259 BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1260 .addReg(ARM::LR)
1261 .add(predOps(ARMCC::AL));
1262 }
1263 // Clear second part as single
1264 if (ClearRegs[D * 2 + 1]) {
1265 unsigned Reg = ARM::S0 + D * 2 + 1;
1266 BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1267 .addReg(ARM::LR)
1268 .add(predOps(ARMCC::AL));
1269 }
1270 }
1271 }
1272
1273 // Clear FPSCR bits 0-4, 7, 28-31
1274 // The other bits are program global according to the AAPCS
1275 BuildMI(ClearBB, DL, TII->get(ARM::VMRS), ARM::R12)
1276 .add(predOps(ARMCC::AL));
1277 BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1278 .addReg(ARM::R12)
1279 .addImm(0x0000009F)
1280 .add(predOps(ARMCC::AL))
1281 .add(condCodeOp());
1282 BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1283 .addReg(ARM::R12)
1284 .addImm(0xF0000000)
1285 .add(predOps(ARMCC::AL))
1286 .add(condCodeOp());
1287 BuildMI(ClearBB, DL, TII->get(ARM::VMSR))
1288 .addReg(ARM::R12)
1289 .add(predOps(ARMCC::AL));
1290
1291 return *DoneBB;
1292 }
1293
1294 MachineBasicBlock &
CMSEClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1295 ARMExpandPseudo::CMSEClearFPRegsV81(MachineBasicBlock &MBB,
1296 MachineBasicBlock::iterator MBBI,
1297 const BitVector &ClearRegs) {
1298 auto &RetI = *MBBI;
1299
1300 // Emit a sequence of VSCCLRM <sreglist> instructions, one instruction for
1301 // each contiguous sequence of S-registers.
1302 int Start = -1, End = -1;
1303 for (int S = 0, E = ClearRegs.size(); S != E; ++S) {
1304 if (ClearRegs[S] && S == End + 1) {
1305 End = S; // extend range
1306 continue;
1307 }
1308 // Emit current range.
1309 if (Start < End) {
1310 MachineInstrBuilder VSCCLRM =
1311 BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1312 .add(predOps(ARMCC::AL));
1313 while (++Start <= End)
1314 VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1315 VSCCLRM.addReg(ARM::VPR, RegState::Define);
1316 }
1317 Start = End = S;
1318 }
1319 // Emit last range.
1320 if (Start < End) {
1321 MachineInstrBuilder VSCCLRM =
1322 BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1323 .add(predOps(ARMCC::AL));
1324 while (++Start <= End)
1325 VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1326 VSCCLRM.addReg(ARM::VPR, RegState::Define);
1327 }
1328
1329 return MBB;
1330 }
1331
CMSESaveClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1332 void ARMExpandPseudo::CMSESaveClearFPRegs(
1333 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1334 const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1335 if (STI->hasV8_1MMainlineOps())
1336 CMSESaveClearFPRegsV81(MBB, MBBI, DL, LiveRegs);
1337 else if (STI->hasV8MMainlineOps())
1338 CMSESaveClearFPRegsV8(MBB, MBBI, DL, LiveRegs, ScratchRegs);
1339 }
1340
1341 // Save and clear FP registers if present
CMSESaveClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1342 void ARMExpandPseudo::CMSESaveClearFPRegsV8(
1343 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1344 const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1345
1346 // Store an available register for FPSCR clearing
1347 assert(!ScratchRegs.empty());
1348 unsigned SpareReg = ScratchRegs.front();
1349
1350 // save space on stack for VLSTM
1351 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1352 .addReg(ARM::SP)
1353 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1354 .add(predOps(ARMCC::AL));
1355
1356 // Use ScratchRegs to store the fp regs
1357 std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1358 std::vector<unsigned> NonclearedFPRegs;
1359 for (const MachineOperand &Op : MBBI->operands()) {
1360 if (Op.isReg() && Op.isUse()) {
1361 Register Reg = Op.getReg();
1362 assert(!ARM::DPRRegClass.contains(Reg) ||
1363 ARM::DPR_VFP2RegClass.contains(Reg));
1364 assert(!ARM::QPRRegClass.contains(Reg));
1365 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1366 if (ScratchRegs.size() >= 2) {
1367 unsigned SaveReg2 = ScratchRegs.pop_back_val();
1368 unsigned SaveReg1 = ScratchRegs.pop_back_val();
1369 ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1370
1371 // Save the fp register to the normal registers
1372 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1373 .addReg(SaveReg1, RegState::Define)
1374 .addReg(SaveReg2, RegState::Define)
1375 .addReg(Reg)
1376 .add(predOps(ARMCC::AL));
1377 } else {
1378 NonclearedFPRegs.push_back(Reg);
1379 }
1380 } else if (ARM::SPRRegClass.contains(Reg)) {
1381 if (ScratchRegs.size() >= 1) {
1382 unsigned SaveReg = ScratchRegs.pop_back_val();
1383 ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1384
1385 // Save the fp register to the normal registers
1386 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1387 .addReg(Reg)
1388 .add(predOps(ARMCC::AL));
1389 } else {
1390 NonclearedFPRegs.push_back(Reg);
1391 }
1392 }
1393 }
1394 }
1395
1396 bool passesFPReg = (!NonclearedFPRegs.empty() || !ClearedFPRegs.empty());
1397
1398 if (passesFPReg)
1399 assert(STI->hasFPRegs() && "Subtarget needs fpregs");
1400
1401 // Lazy store all fp registers to the stack.
1402 // This executes as NOP in the absence of floating-point support.
1403 MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1404 .addReg(ARM::SP)
1405 .add(predOps(ARMCC::AL));
1406 for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1407 ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1408 VLSTM.addReg(R, RegState::Implicit |
1409 (LiveRegs.contains(R) ? 0 : RegState::Undef));
1410
1411 // Restore all arguments
1412 for (const auto &Regs : ClearedFPRegs) {
1413 unsigned Reg, SaveReg1, SaveReg2;
1414 std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1415 if (ARM::DPR_VFP2RegClass.contains(Reg))
1416 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1417 .addReg(SaveReg1)
1418 .addReg(SaveReg2)
1419 .add(predOps(ARMCC::AL));
1420 else if (ARM::SPRRegClass.contains(Reg))
1421 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1422 .addReg(SaveReg1)
1423 .add(predOps(ARMCC::AL));
1424 }
1425
1426 for (unsigned Reg : NonclearedFPRegs) {
1427 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1428 if (STI->isLittle()) {
1429 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRD), Reg)
1430 .addReg(ARM::SP)
1431 .addImm((Reg - ARM::D0) * 2)
1432 .add(predOps(ARMCC::AL));
1433 } else {
1434 // For big-endian targets we need to load the two subregisters of Reg
1435 // manually because VLDRD would load them in wrong order
1436 unsigned SReg0 = TRI->getSubReg(Reg, ARM::ssub_0);
1437 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0)
1438 .addReg(ARM::SP)
1439 .addImm((Reg - ARM::D0) * 2)
1440 .add(predOps(ARMCC::AL));
1441 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0 + 1)
1442 .addReg(ARM::SP)
1443 .addImm((Reg - ARM::D0) * 2 + 1)
1444 .add(predOps(ARMCC::AL));
1445 }
1446 } else if (ARM::SPRRegClass.contains(Reg)) {
1447 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), Reg)
1448 .addReg(ARM::SP)
1449 .addImm(Reg - ARM::S0)
1450 .add(predOps(ARMCC::AL));
1451 }
1452 }
1453 // restore FPSCR from stack and clear bits 0-4, 7, 28-31
1454 // The other bits are program global according to the AAPCS
1455 if (passesFPReg) {
1456 BuildMI(MBB, MBBI, DL, TII->get(ARM::tLDRspi), SpareReg)
1457 .addReg(ARM::SP)
1458 .addImm(0x10)
1459 .add(predOps(ARMCC::AL));
1460 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1461 .addReg(SpareReg)
1462 .addImm(0x0000009F)
1463 .add(predOps(ARMCC::AL))
1464 .add(condCodeOp());
1465 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1466 .addReg(SpareReg)
1467 .addImm(0xF0000000)
1468 .add(predOps(ARMCC::AL))
1469 .add(condCodeOp());
1470 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMSR))
1471 .addReg(SpareReg)
1472 .add(predOps(ARMCC::AL));
1473 // The ldr must happen after a floating point instruction. To prevent the
1474 // post-ra scheduler to mess with the order, we create a bundle.
1475 finalizeBundle(MBB, VLSTM->getIterator(), MBBI->getIterator());
1476 }
1477 }
1478
CMSESaveClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs)1479 void ARMExpandPseudo::CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
1480 MachineBasicBlock::iterator MBBI,
1481 DebugLoc &DL,
1482 const LivePhysRegs &LiveRegs) {
1483 BitVector ClearRegs(32, true);
1484 bool DefFP = determineFPRegsToClear(*MBBI, ClearRegs);
1485
1486 // If the instruction does not write to a FP register and no elements were
1487 // removed from the set, then no FP registers were used to pass
1488 // arguments/returns.
1489 if (!DefFP && ClearRegs.count() == ClearRegs.size()) {
1490 // save space on stack for VLSTM
1491 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1492 .addReg(ARM::SP)
1493 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1494 .add(predOps(ARMCC::AL));
1495
1496 // Lazy store all FP registers to the stack
1497 MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1498 .addReg(ARM::SP)
1499 .add(predOps(ARMCC::AL));
1500 for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1501 ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1502 VLSTM.addReg(R, RegState::Implicit |
1503 (LiveRegs.contains(R) ? 0 : RegState::Undef));
1504 } else {
1505 // Push all the callee-saved registers (s16-s31).
1506 MachineInstrBuilder VPUSH =
1507 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTMSDB_UPD), ARM::SP)
1508 .addReg(ARM::SP)
1509 .add(predOps(ARMCC::AL));
1510 for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1511 VPUSH.addReg(Reg);
1512
1513 // Clear FP registers with a VSCCLRM.
1514 (void)CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1515
1516 // Save floating-point context.
1517 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTR_FPCXTS_pre), ARM::SP)
1518 .addReg(ARM::SP)
1519 .addImm(-8)
1520 .add(predOps(ARMCC::AL));
1521 }
1522 }
1523
1524 // Restore FP registers if present
CMSERestoreFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1525 void ARMExpandPseudo::CMSERestoreFPRegs(
1526 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1527 SmallVectorImpl<unsigned> &AvailableRegs) {
1528 if (STI->hasV8_1MMainlineOps())
1529 CMSERestoreFPRegsV81(MBB, MBBI, DL, AvailableRegs);
1530 else if (STI->hasV8MMainlineOps())
1531 CMSERestoreFPRegsV8(MBB, MBBI, DL, AvailableRegs);
1532 }
1533
CMSERestoreFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1534 void ARMExpandPseudo::CMSERestoreFPRegsV8(
1535 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1536 SmallVectorImpl<unsigned> &AvailableRegs) {
1537
1538 // Keep a scratch register for the mitigation sequence.
1539 unsigned ScratchReg = ARM::NoRegister;
1540 if (STI->fixCMSE_CVE_2021_35465())
1541 ScratchReg = AvailableRegs.pop_back_val();
1542
1543 // Use AvailableRegs to store the fp regs
1544 std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1545 std::vector<unsigned> NonclearedFPRegs;
1546 for (const MachineOperand &Op : MBBI->operands()) {
1547 if (Op.isReg() && Op.isDef()) {
1548 Register Reg = Op.getReg();
1549 assert(!ARM::DPRRegClass.contains(Reg) ||
1550 ARM::DPR_VFP2RegClass.contains(Reg));
1551 assert(!ARM::QPRRegClass.contains(Reg));
1552 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1553 if (AvailableRegs.size() >= 2) {
1554 unsigned SaveReg2 = AvailableRegs.pop_back_val();
1555 unsigned SaveReg1 = AvailableRegs.pop_back_val();
1556 ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1557
1558 // Save the fp register to the normal registers
1559 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1560 .addReg(SaveReg1, RegState::Define)
1561 .addReg(SaveReg2, RegState::Define)
1562 .addReg(Reg)
1563 .add(predOps(ARMCC::AL));
1564 } else {
1565 NonclearedFPRegs.push_back(Reg);
1566 }
1567 } else if (ARM::SPRRegClass.contains(Reg)) {
1568 if (AvailableRegs.size() >= 1) {
1569 unsigned SaveReg = AvailableRegs.pop_back_val();
1570 ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1571
1572 // Save the fp register to the normal registers
1573 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1574 .addReg(Reg)
1575 .add(predOps(ARMCC::AL));
1576 } else {
1577 NonclearedFPRegs.push_back(Reg);
1578 }
1579 }
1580 }
1581 }
1582
1583 bool returnsFPReg = (!NonclearedFPRegs.empty() || !ClearedFPRegs.empty());
1584
1585 if (returnsFPReg)
1586 assert(STI->hasFPRegs() && "Subtarget needs fpregs");
1587
1588 // Push FP regs that cannot be restored via normal registers on the stack
1589 for (unsigned Reg : NonclearedFPRegs) {
1590 if (ARM::DPR_VFP2RegClass.contains(Reg))
1591 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRD))
1592 .addReg(Reg)
1593 .addReg(ARM::SP)
1594 .addImm((Reg - ARM::D0) * 2)
1595 .add(predOps(ARMCC::AL));
1596 else if (ARM::SPRRegClass.contains(Reg))
1597 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRS))
1598 .addReg(Reg)
1599 .addReg(ARM::SP)
1600 .addImm(Reg - ARM::S0)
1601 .add(predOps(ARMCC::AL));
1602 }
1603
1604 // Lazy load fp regs from stack.
1605 // This executes as NOP in the absence of floating-point support.
1606 MachineInstrBuilder VLLDM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1607 .addReg(ARM::SP)
1608 .add(predOps(ARMCC::AL));
1609
1610 if (STI->fixCMSE_CVE_2021_35465()) {
1611 auto Bundler = MIBundleBuilder(MBB, VLLDM);
1612 // Read the CONTROL register.
1613 Bundler.append(BuildMI(*MBB.getParent(), DL, TII->get(ARM::t2MRS_M))
1614 .addReg(ScratchReg, RegState::Define)
1615 .addImm(20)
1616 .add(predOps(ARMCC::AL)));
1617 // Check bit 3 (SFPA).
1618 Bundler.append(BuildMI(*MBB.getParent(), DL, TII->get(ARM::t2TSTri))
1619 .addReg(ScratchReg)
1620 .addImm(8)
1621 .add(predOps(ARMCC::AL)));
1622 // Emit the IT block.
1623 Bundler.append(BuildMI(*MBB.getParent(), DL, TII->get(ARM::t2IT))
1624 .addImm(ARMCC::NE)
1625 .addImm(8));
1626 // If SFPA is clear jump over to VLLDM, otherwise execute an instruction
1627 // which has no functional effect apart from causing context creation:
1628 // vmovne s0, s0. In the absence of FPU we emit .inst.w 0xeeb00a40,
1629 // which is defined as NOP if not executed.
1630 if (STI->hasFPRegs())
1631 Bundler.append(BuildMI(*MBB.getParent(), DL, TII->get(ARM::VMOVS))
1632 .addReg(ARM::S0, RegState::Define)
1633 .addReg(ARM::S0, RegState::Undef)
1634 .add(predOps(ARMCC::NE)));
1635 else
1636 Bundler.append(BuildMI(*MBB.getParent(), DL, TII->get(ARM::INLINEASM))
1637 .addExternalSymbol(".inst.w 0xeeb00a40")
1638 .addImm(InlineAsm::Extra_HasSideEffects));
1639 finalizeBundle(MBB, Bundler.begin(), Bundler.end());
1640 }
1641
1642 // Restore all FP registers via normal registers
1643 for (const auto &Regs : ClearedFPRegs) {
1644 unsigned Reg, SaveReg1, SaveReg2;
1645 std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1646 if (ARM::DPR_VFP2RegClass.contains(Reg))
1647 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1648 .addReg(SaveReg1)
1649 .addReg(SaveReg2)
1650 .add(predOps(ARMCC::AL));
1651 else if (ARM::SPRRegClass.contains(Reg))
1652 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1653 .addReg(SaveReg1)
1654 .add(predOps(ARMCC::AL));
1655 }
1656
1657 // Pop the stack space
1658 BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1659 .addReg(ARM::SP)
1660 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1661 .add(predOps(ARMCC::AL));
1662 }
1663
definesOrUsesFPReg(const MachineInstr & MI)1664 static bool definesOrUsesFPReg(const MachineInstr &MI) {
1665 for (const MachineOperand &Op : MI.operands()) {
1666 if (!Op.isReg())
1667 continue;
1668 Register Reg = Op.getReg();
1669 if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1670 (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1671 (Reg >= ARM::S0 && Reg <= ARM::S31))
1672 return true;
1673 }
1674 return false;
1675 }
1676
CMSERestoreFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1677 void ARMExpandPseudo::CMSERestoreFPRegsV81(
1678 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1679 SmallVectorImpl<unsigned> &AvailableRegs) {
1680 if (!definesOrUsesFPReg(*MBBI)) {
1681 if (STI->fixCMSE_CVE_2021_35465()) {
1682 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSCCLRMS))
1683 .add(predOps(ARMCC::AL))
1684 .addReg(ARM::VPR, RegState::Define);
1685 }
1686
1687 // Load FP registers from stack.
1688 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1689 .addReg(ARM::SP)
1690 .add(predOps(ARMCC::AL));
1691
1692 // Pop the stack space
1693 BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1694 .addReg(ARM::SP)
1695 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1696 .add(predOps(ARMCC::AL));
1697 } else {
1698 // Restore the floating point context.
1699 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::VLDR_FPCXTS_post),
1700 ARM::SP)
1701 .addReg(ARM::SP)
1702 .addImm(8)
1703 .add(predOps(ARMCC::AL));
1704
1705 // Pop all the callee-saved registers (s16-s31).
1706 MachineInstrBuilder VPOP =
1707 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDMSIA_UPD), ARM::SP)
1708 .addReg(ARM::SP)
1709 .add(predOps(ARMCC::AL));
1710 for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1711 VPOP.addReg(Reg, RegState::Define);
1712 }
1713 }
1714
1715 /// Expand a CMP_SWAP pseudo-inst to an ldrex/strex loop as simply as
1716 /// possible. This only gets used at -O0 so we don't care about efficiency of
1717 /// the generated code.
ExpandCMP_SWAP(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned LdrexOp,unsigned StrexOp,unsigned UxtOp,MachineBasicBlock::iterator & NextMBBI)1718 bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB,
1719 MachineBasicBlock::iterator MBBI,
1720 unsigned LdrexOp, unsigned StrexOp,
1721 unsigned UxtOp,
1722 MachineBasicBlock::iterator &NextMBBI) {
1723 bool IsThumb = STI->isThumb();
1724 bool IsThumb1Only = STI->isThumb1Only();
1725 MachineInstr &MI = *MBBI;
1726 DebugLoc DL = MI.getDebugLoc();
1727 const MachineOperand &Dest = MI.getOperand(0);
1728 Register TempReg = MI.getOperand(1).getReg();
1729 // Duplicating undef operands into 2 instructions does not guarantee the same
1730 // value on both; However undef should be replaced by xzr anyway.
1731 assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1732 Register AddrReg = MI.getOperand(2).getReg();
1733 Register DesiredReg = MI.getOperand(3).getReg();
1734 Register NewReg = MI.getOperand(4).getReg();
1735
1736 if (IsThumb) {
1737 assert(STI->hasV8MBaselineOps() &&
1738 "CMP_SWAP not expected to be custom expanded for Thumb1");
1739 assert((UxtOp == 0 || UxtOp == ARM::tUXTB || UxtOp == ARM::tUXTH) &&
1740 "ARMv8-M.baseline does not have t2UXTB/t2UXTH");
1741 assert((UxtOp == 0 || ARM::tGPRRegClass.contains(DesiredReg)) &&
1742 "DesiredReg used for UXT op must be tGPR");
1743 }
1744
1745 MachineFunction *MF = MBB.getParent();
1746 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1747 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1748 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1749
1750 MF->insert(++MBB.getIterator(), LoadCmpBB);
1751 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1752 MF->insert(++StoreBB->getIterator(), DoneBB);
1753
1754 if (UxtOp) {
1755 MachineInstrBuilder MIB =
1756 BuildMI(MBB, MBBI, DL, TII->get(UxtOp), DesiredReg)
1757 .addReg(DesiredReg, RegState::Kill);
1758 if (!IsThumb)
1759 MIB.addImm(0);
1760 MIB.add(predOps(ARMCC::AL));
1761 }
1762
1763 // .Lloadcmp:
1764 // ldrex rDest, [rAddr]
1765 // cmp rDest, rDesired
1766 // bne .Ldone
1767
1768 MachineInstrBuilder MIB;
1769 MIB = BuildMI(LoadCmpBB, DL, TII->get(LdrexOp), Dest.getReg());
1770 MIB.addReg(AddrReg);
1771 if (LdrexOp == ARM::t2LDREX)
1772 MIB.addImm(0); // a 32-bit Thumb ldrex (only) allows an offset.
1773 MIB.add(predOps(ARMCC::AL));
1774
1775 unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1776 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1777 .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
1778 .addReg(DesiredReg)
1779 .add(predOps(ARMCC::AL));
1780 unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1781 BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1782 .addMBB(DoneBB)
1783 .addImm(ARMCC::NE)
1784 .addReg(ARM::CPSR, RegState::Kill);
1785 LoadCmpBB->addSuccessor(DoneBB);
1786 LoadCmpBB->addSuccessor(StoreBB);
1787
1788 // .Lstore:
1789 // strex rTempReg, rNew, [rAddr]
1790 // cmp rTempReg, #0
1791 // bne .Lloadcmp
1792 MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), TempReg)
1793 .addReg(NewReg)
1794 .addReg(AddrReg);
1795 if (StrexOp == ARM::t2STREX)
1796 MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset.
1797 MIB.add(predOps(ARMCC::AL));
1798
1799 unsigned CMPri =
1800 IsThumb ? (IsThumb1Only ? ARM::tCMPi8 : ARM::t2CMPri) : ARM::CMPri;
1801 BuildMI(StoreBB, DL, TII->get(CMPri))
1802 .addReg(TempReg, RegState::Kill)
1803 .addImm(0)
1804 .add(predOps(ARMCC::AL));
1805 BuildMI(StoreBB, DL, TII->get(Bcc))
1806 .addMBB(LoadCmpBB)
1807 .addImm(ARMCC::NE)
1808 .addReg(ARM::CPSR, RegState::Kill);
1809 StoreBB->addSuccessor(LoadCmpBB);
1810 StoreBB->addSuccessor(DoneBB);
1811
1812 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1813 DoneBB->transferSuccessors(&MBB);
1814
1815 MBB.addSuccessor(LoadCmpBB);
1816
1817 NextMBBI = MBB.end();
1818 MI.eraseFromParent();
1819
1820 // Recompute livein lists.
1821 LivePhysRegs LiveRegs;
1822 computeAndAddLiveIns(LiveRegs, *DoneBB);
1823 computeAndAddLiveIns(LiveRegs, *StoreBB);
1824 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1825 // Do an extra pass around the loop to get loop carried registers right.
1826 StoreBB->clearLiveIns();
1827 computeAndAddLiveIns(LiveRegs, *StoreBB);
1828 LoadCmpBB->clearLiveIns();
1829 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1830
1831 return true;
1832 }
1833
1834 /// ARM's ldrexd/strexd take a consecutive register pair (represented as a
1835 /// single GPRPair register), Thumb's take two separate registers so we need to
1836 /// extract the subregs from the pair.
addExclusiveRegPair(MachineInstrBuilder & MIB,MachineOperand & Reg,unsigned Flags,bool IsThumb,const TargetRegisterInfo * TRI)1837 static void addExclusiveRegPair(MachineInstrBuilder &MIB, MachineOperand &Reg,
1838 unsigned Flags, bool IsThumb,
1839 const TargetRegisterInfo *TRI) {
1840 if (IsThumb) {
1841 Register RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0);
1842 Register RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1);
1843 MIB.addReg(RegLo, Flags);
1844 MIB.addReg(RegHi, Flags);
1845 } else
1846 MIB.addReg(Reg.getReg(), Flags);
1847 }
1848
1849 /// Expand a 64-bit CMP_SWAP to an ldrexd/strexd loop.
ExpandCMP_SWAP_64(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)1850 bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
1851 MachineBasicBlock::iterator MBBI,
1852 MachineBasicBlock::iterator &NextMBBI) {
1853 bool IsThumb = STI->isThumb();
1854 assert(!STI->isThumb1Only() && "CMP_SWAP_64 unsupported under Thumb1!");
1855 MachineInstr &MI = *MBBI;
1856 DebugLoc DL = MI.getDebugLoc();
1857 MachineOperand &Dest = MI.getOperand(0);
1858 Register TempReg = MI.getOperand(1).getReg();
1859 // Duplicating undef operands into 2 instructions does not guarantee the same
1860 // value on both; However undef should be replaced by xzr anyway.
1861 assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1862 Register AddrReg = MI.getOperand(2).getReg();
1863 Register DesiredReg = MI.getOperand(3).getReg();
1864 MachineOperand New = MI.getOperand(4);
1865 New.setIsKill(false);
1866
1867 Register DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0);
1868 Register DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1);
1869 Register DesiredLo = TRI->getSubReg(DesiredReg, ARM::gsub_0);
1870 Register DesiredHi = TRI->getSubReg(DesiredReg, ARM::gsub_1);
1871
1872 MachineFunction *MF = MBB.getParent();
1873 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1874 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1875 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1876
1877 MF->insert(++MBB.getIterator(), LoadCmpBB);
1878 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1879 MF->insert(++StoreBB->getIterator(), DoneBB);
1880
1881 // .Lloadcmp:
1882 // ldrexd rDestLo, rDestHi, [rAddr]
1883 // cmp rDestLo, rDesiredLo
1884 // sbcs dead rTempReg, rDestHi, rDesiredHi
1885 // bne .Ldone
1886 unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
1887 MachineInstrBuilder MIB;
1888 MIB = BuildMI(LoadCmpBB, DL, TII->get(LDREXD));
1889 addExclusiveRegPair(MIB, Dest, RegState::Define, IsThumb, TRI);
1890 MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1891
1892 unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1893 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1894 .addReg(DestLo, getKillRegState(Dest.isDead()))
1895 .addReg(DesiredLo)
1896 .add(predOps(ARMCC::AL));
1897
1898 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1899 .addReg(DestHi, getKillRegState(Dest.isDead()))
1900 .addReg(DesiredHi)
1901 .addImm(ARMCC::EQ).addReg(ARM::CPSR, RegState::Kill);
1902
1903 unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1904 BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1905 .addMBB(DoneBB)
1906 .addImm(ARMCC::NE)
1907 .addReg(ARM::CPSR, RegState::Kill);
1908 LoadCmpBB->addSuccessor(DoneBB);
1909 LoadCmpBB->addSuccessor(StoreBB);
1910
1911 // .Lstore:
1912 // strexd rTempReg, rNewLo, rNewHi, [rAddr]
1913 // cmp rTempReg, #0
1914 // bne .Lloadcmp
1915 unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD;
1916 MIB = BuildMI(StoreBB, DL, TII->get(STREXD), TempReg);
1917 unsigned Flags = getKillRegState(New.isDead());
1918 addExclusiveRegPair(MIB, New, Flags, IsThumb, TRI);
1919 MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1920
1921 unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
1922 BuildMI(StoreBB, DL, TII->get(CMPri))
1923 .addReg(TempReg, RegState::Kill)
1924 .addImm(0)
1925 .add(predOps(ARMCC::AL));
1926 BuildMI(StoreBB, DL, TII->get(Bcc))
1927 .addMBB(LoadCmpBB)
1928 .addImm(ARMCC::NE)
1929 .addReg(ARM::CPSR, RegState::Kill);
1930 StoreBB->addSuccessor(LoadCmpBB);
1931 StoreBB->addSuccessor(DoneBB);
1932
1933 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1934 DoneBB->transferSuccessors(&MBB);
1935
1936 MBB.addSuccessor(LoadCmpBB);
1937
1938 NextMBBI = MBB.end();
1939 MI.eraseFromParent();
1940
1941 // Recompute livein lists.
1942 LivePhysRegs LiveRegs;
1943 computeAndAddLiveIns(LiveRegs, *DoneBB);
1944 computeAndAddLiveIns(LiveRegs, *StoreBB);
1945 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1946 // Do an extra pass around the loop to get loop carried registers right.
1947 StoreBB->clearLiveIns();
1948 computeAndAddLiveIns(LiveRegs, *StoreBB);
1949 LoadCmpBB->clearLiveIns();
1950 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1951
1952 return true;
1953 }
1954
CMSEPushCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,const LivePhysRegs & LiveRegs,bool Thumb1Only)1955 static void CMSEPushCalleeSaves(const TargetInstrInfo &TII,
1956 MachineBasicBlock &MBB,
1957 MachineBasicBlock::iterator MBBI, int JumpReg,
1958 const LivePhysRegs &LiveRegs, bool Thumb1Only) {
1959 const DebugLoc &DL = MBBI->getDebugLoc();
1960 if (Thumb1Only) { // push Lo and Hi regs separately
1961 MachineInstrBuilder PushMIB =
1962 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1963 for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1964 PushMIB.addReg(
1965 Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
1966 }
1967
1968 // Thumb1 can only tPUSH low regs, so we copy the high regs to the low
1969 // regs that we just saved and push the low regs again, taking care to
1970 // not clobber JumpReg. If JumpReg is one of the low registers, push first
1971 // the values of r9-r11, and then r8. That would leave them ordered in
1972 // memory, and allow us to later pop them with a single instructions.
1973 // FIXME: Could also use any of r0-r3 that are free (including in the
1974 // first PUSH above).
1975 for (int LoReg = ARM::R7, HiReg = ARM::R11; LoReg >= ARM::R4; --LoReg) {
1976 if (JumpReg == LoReg)
1977 continue;
1978 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1979 .addReg(HiReg, LiveRegs.contains(HiReg) ? 0 : RegState::Undef)
1980 .add(predOps(ARMCC::AL));
1981 --HiReg;
1982 }
1983 MachineInstrBuilder PushMIB2 =
1984 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1985 for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1986 if (Reg == JumpReg)
1987 continue;
1988 PushMIB2.addReg(Reg, RegState::Kill);
1989 }
1990
1991 // If we couldn't use a low register for temporary storage (because it was
1992 // the JumpReg), use r4 or r5, whichever is not JumpReg. It has already been
1993 // saved.
1994 if (JumpReg >= ARM::R4 && JumpReg <= ARM::R7) {
1995 int LoReg = JumpReg == ARM::R4 ? ARM::R5 : ARM::R4;
1996 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1997 .addReg(ARM::R8, LiveRegs.contains(ARM::R8) ? 0 : RegState::Undef)
1998 .add(predOps(ARMCC::AL));
1999 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH))
2000 .add(predOps(ARMCC::AL))
2001 .addReg(LoReg, RegState::Kill);
2002 }
2003 } else { // push Lo and Hi registers with a single instruction
2004 MachineInstrBuilder PushMIB =
2005 BuildMI(MBB, MBBI, DL, TII.get(ARM::t2STMDB_UPD), ARM::SP)
2006 .addReg(ARM::SP)
2007 .add(predOps(ARMCC::AL));
2008 for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg) {
2009 PushMIB.addReg(
2010 Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
2011 }
2012 }
2013 }
2014
CMSEPopCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,bool Thumb1Only)2015 static void CMSEPopCalleeSaves(const TargetInstrInfo &TII,
2016 MachineBasicBlock &MBB,
2017 MachineBasicBlock::iterator MBBI, int JumpReg,
2018 bool Thumb1Only) {
2019 const DebugLoc &DL = MBBI->getDebugLoc();
2020 if (Thumb1Only) {
2021 MachineInstrBuilder PopMIB =
2022 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
2023 for (int R = 0; R < 4; ++R) {
2024 PopMIB.addReg(ARM::R4 + R, RegState::Define);
2025 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), ARM::R8 + R)
2026 .addReg(ARM::R4 + R, RegState::Kill)
2027 .add(predOps(ARMCC::AL));
2028 }
2029 MachineInstrBuilder PopMIB2 =
2030 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
2031 for (int R = 0; R < 4; ++R)
2032 PopMIB2.addReg(ARM::R4 + R, RegState::Define);
2033 } else { // pop Lo and Hi registers with a single instruction
2034 MachineInstrBuilder PopMIB =
2035 BuildMI(MBB, MBBI, DL, TII.get(ARM::t2LDMIA_UPD), ARM::SP)
2036 .addReg(ARM::SP)
2037 .add(predOps(ARMCC::AL));
2038 for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg)
2039 PopMIB.addReg(Reg, RegState::Define);
2040 }
2041 }
2042
ExpandMI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)2043 bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
2044 MachineBasicBlock::iterator MBBI,
2045 MachineBasicBlock::iterator &NextMBBI) {
2046 MachineInstr &MI = *MBBI;
2047 unsigned Opcode = MI.getOpcode();
2048 switch (Opcode) {
2049 default:
2050 return false;
2051
2052 case ARM::VBSPd:
2053 case ARM::VBSPq: {
2054 Register DstReg = MI.getOperand(0).getReg();
2055 if (DstReg == MI.getOperand(3).getReg()) {
2056 // Expand to VBIT
2057 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBITd : ARM::VBITq;
2058 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
2059 .add(MI.getOperand(0))
2060 .add(MI.getOperand(3))
2061 .add(MI.getOperand(2))
2062 .add(MI.getOperand(1))
2063 .addImm(MI.getOperand(4).getImm())
2064 .add(MI.getOperand(5));
2065 } else if (DstReg == MI.getOperand(2).getReg()) {
2066 // Expand to VBIF
2067 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBIFd : ARM::VBIFq;
2068 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
2069 .add(MI.getOperand(0))
2070 .add(MI.getOperand(2))
2071 .add(MI.getOperand(3))
2072 .add(MI.getOperand(1))
2073 .addImm(MI.getOperand(4).getImm())
2074 .add(MI.getOperand(5));
2075 } else {
2076 // Expand to VBSL
2077 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBSLd : ARM::VBSLq;
2078 if (DstReg == MI.getOperand(1).getReg()) {
2079 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
2080 .add(MI.getOperand(0))
2081 .add(MI.getOperand(1))
2082 .add(MI.getOperand(2))
2083 .add(MI.getOperand(3))
2084 .addImm(MI.getOperand(4).getImm())
2085 .add(MI.getOperand(5));
2086 } else {
2087 // Use move to satisfy constraints
2088 unsigned MoveOpc = Opcode == ARM::VBSPd ? ARM::VORRd : ARM::VORRq;
2089 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MoveOpc))
2090 .addReg(DstReg,
2091 RegState::Define |
2092 getRenamableRegState(MI.getOperand(0).isRenamable()))
2093 .add(MI.getOperand(1))
2094 .add(MI.getOperand(1))
2095 .addImm(MI.getOperand(4).getImm())
2096 .add(MI.getOperand(5));
2097 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
2098 .add(MI.getOperand(0))
2099 .addReg(DstReg,
2100 RegState::Kill |
2101 getRenamableRegState(MI.getOperand(0).isRenamable()))
2102 .add(MI.getOperand(2))
2103 .add(MI.getOperand(3))
2104 .addImm(MI.getOperand(4).getImm())
2105 .add(MI.getOperand(5));
2106 }
2107 }
2108 MI.eraseFromParent();
2109 return true;
2110 }
2111
2112 case ARM::TCRETURNdi:
2113 case ARM::TCRETURNri: {
2114 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
2115 if (MBBI->getOpcode() == ARM::SEH_EpilogEnd)
2116 MBBI--;
2117 if (MBBI->getOpcode() == ARM::SEH_Nop_Ret)
2118 MBBI--;
2119 assert(MBBI->isReturn() &&
2120 "Can only insert epilog into returning blocks");
2121 unsigned RetOpcode = MBBI->getOpcode();
2122 DebugLoc dl = MBBI->getDebugLoc();
2123 const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>(
2124 MBB.getParent()->getSubtarget().getInstrInfo());
2125
2126 // Tail call return: adjust the stack pointer and jump to callee.
2127 MBBI = MBB.getLastNonDebugInstr();
2128 if (MBBI->getOpcode() == ARM::SEH_EpilogEnd)
2129 MBBI--;
2130 if (MBBI->getOpcode() == ARM::SEH_Nop_Ret)
2131 MBBI--;
2132 MachineOperand &JumpTarget = MBBI->getOperand(0);
2133
2134 // Jump to label or value in register.
2135 if (RetOpcode == ARM::TCRETURNdi) {
2136 MachineFunction *MF = MBB.getParent();
2137 bool NeedsWinCFI = MF->getTarget().getMCAsmInfo()->usesWindowsCFI() &&
2138 MF->getFunction().needsUnwindTableEntry();
2139 unsigned TCOpcode =
2140 STI->isThumb()
2141 ? ((STI->isTargetMachO() || NeedsWinCFI) ? ARM::tTAILJMPd
2142 : ARM::tTAILJMPdND)
2143 : ARM::TAILJMPd;
2144 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
2145 if (JumpTarget.isGlobal())
2146 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
2147 JumpTarget.getTargetFlags());
2148 else {
2149 assert(JumpTarget.isSymbol());
2150 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
2151 JumpTarget.getTargetFlags());
2152 }
2153
2154 // Add the default predicate in Thumb mode.
2155 if (STI->isThumb())
2156 MIB.add(predOps(ARMCC::AL));
2157 } else if (RetOpcode == ARM::TCRETURNri) {
2158 unsigned Opcode =
2159 STI->isThumb() ? ARM::tTAILJMPr
2160 : (STI->hasV4TOps() ? ARM::TAILJMPr : ARM::TAILJMPr4);
2161 BuildMI(MBB, MBBI, dl,
2162 TII.get(Opcode))
2163 .addReg(JumpTarget.getReg(), RegState::Kill);
2164 }
2165
2166 auto NewMI = std::prev(MBBI);
2167 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
2168 NewMI->addOperand(MBBI->getOperand(i));
2169
2170
2171 // Update call site info and delete the pseudo instruction TCRETURN.
2172 if (MI.isCandidateForCallSiteEntry())
2173 MI.getMF()->moveCallSiteInfo(&MI, &*NewMI);
2174 MBB.erase(MBBI);
2175
2176 MBBI = NewMI;
2177 return true;
2178 }
2179 case ARM::tBXNS_RET: {
2180 // For v8.0-M.Main we need to authenticate LR before clearing FPRs, which
2181 // uses R12 as a scratch register.
2182 if (!STI->hasV8_1MMainlineOps() && AFI->shouldSignReturnAddress())
2183 BuildMI(MBB, MBBI, DebugLoc(), TII->get(ARM::t2AUT));
2184
2185 MachineBasicBlock &AfterBB = CMSEClearFPRegs(MBB, MBBI);
2186
2187 if (STI->hasV8_1MMainlineOps()) {
2188 // Restore the non-secure floating point context.
2189 BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
2190 TII->get(ARM::VLDR_FPCXTNS_post), ARM::SP)
2191 .addReg(ARM::SP)
2192 .addImm(4)
2193 .add(predOps(ARMCC::AL));
2194
2195 if (AFI->shouldSignReturnAddress())
2196 BuildMI(AfterBB, AfterBB.end(), DebugLoc(), TII->get(ARM::t2AUT));
2197 }
2198
2199 // Clear all GPR that are not a use of the return instruction.
2200 assert(llvm::all_of(MBBI->operands(), [](const MachineOperand &Op) {
2201 return !Op.isReg() || Op.getReg() != ARM::R12;
2202 }));
2203 SmallVector<unsigned, 5> ClearRegs;
2204 determineGPRegsToClear(
2205 *MBBI, {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R12}, ClearRegs);
2206 CMSEClearGPRegs(AfterBB, AfterBB.end(), MBBI->getDebugLoc(), ClearRegs,
2207 ARM::LR);
2208
2209 MachineInstrBuilder NewMI =
2210 BuildMI(AfterBB, AfterBB.end(), MBBI->getDebugLoc(),
2211 TII->get(ARM::tBXNS))
2212 .addReg(ARM::LR)
2213 .add(predOps(ARMCC::AL));
2214 for (const MachineOperand &Op : MI.operands())
2215 NewMI->addOperand(Op);
2216 MI.eraseFromParent();
2217 return true;
2218 }
2219 case ARM::tBLXNS_CALL: {
2220 DebugLoc DL = MBBI->getDebugLoc();
2221 Register JumpReg = MBBI->getOperand(0).getReg();
2222
2223 // Figure out which registers are live at the point immediately before the
2224 // call. When we indiscriminately push a set of registers, the live
2225 // registers are added as ordinary use operands, whereas dead registers
2226 // are "undef".
2227 LivePhysRegs LiveRegs(*TRI);
2228 LiveRegs.addLiveOuts(MBB);
2229 for (const MachineInstr &MI : make_range(MBB.rbegin(), MBBI.getReverse()))
2230 LiveRegs.stepBackward(MI);
2231 LiveRegs.stepBackward(*MBBI);
2232
2233 CMSEPushCalleeSaves(*TII, MBB, MBBI, JumpReg, LiveRegs,
2234 AFI->isThumb1OnlyFunction());
2235
2236 SmallVector<unsigned, 16> ClearRegs;
2237 determineGPRegsToClear(*MBBI,
2238 {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4,
2239 ARM::R5, ARM::R6, ARM::R7, ARM::R8, ARM::R9,
2240 ARM::R10, ARM::R11, ARM::R12},
2241 ClearRegs);
2242 auto OriginalClearRegs = ClearRegs;
2243
2244 // Get the first cleared register as a scratch (to use later with tBIC).
2245 // We need to use the first so we can ensure it is a low register.
2246 unsigned ScratchReg = ClearRegs.front();
2247
2248 // Clear LSB of JumpReg
2249 if (AFI->isThumb2Function()) {
2250 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), JumpReg)
2251 .addReg(JumpReg)
2252 .addImm(1)
2253 .add(predOps(ARMCC::AL))
2254 .add(condCodeOp());
2255 } else {
2256 // We need to use an extra register to cope with 8M Baseline,
2257 // since we have saved all of the registers we are ok to trash a non
2258 // argument register here.
2259 BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVi8), ScratchReg)
2260 .add(condCodeOp())
2261 .addImm(1)
2262 .add(predOps(ARMCC::AL));
2263 BuildMI(MBB, MBBI, DL, TII->get(ARM::tBIC), JumpReg)
2264 .addReg(ARM::CPSR, RegState::Define)
2265 .addReg(JumpReg)
2266 .addReg(ScratchReg)
2267 .add(predOps(ARMCC::AL));
2268 }
2269
2270 CMSESaveClearFPRegs(MBB, MBBI, DL, LiveRegs,
2271 ClearRegs); // save+clear FP regs with ClearRegs
2272 CMSEClearGPRegs(MBB, MBBI, DL, ClearRegs, JumpReg);
2273
2274 const MachineInstrBuilder NewCall =
2275 BuildMI(MBB, MBBI, DL, TII->get(ARM::tBLXNSr))
2276 .add(predOps(ARMCC::AL))
2277 .addReg(JumpReg, RegState::Kill);
2278
2279 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
2280 NewCall->addOperand(MO);
2281 if (MI.isCandidateForCallSiteEntry())
2282 MI.getMF()->moveCallSiteInfo(&MI, NewCall.getInstr());
2283
2284 CMSERestoreFPRegs(MBB, MBBI, DL, OriginalClearRegs); // restore FP registers
2285
2286 CMSEPopCalleeSaves(*TII, MBB, MBBI, JumpReg, AFI->isThumb1OnlyFunction());
2287
2288 MI.eraseFromParent();
2289 return true;
2290 }
2291 case ARM::VMOVHcc:
2292 case ARM::VMOVScc:
2293 case ARM::VMOVDcc: {
2294 unsigned newOpc = Opcode != ARM::VMOVDcc ? ARM::VMOVS : ARM::VMOVD;
2295 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc),
2296 MI.getOperand(1).getReg())
2297 .add(MI.getOperand(2))
2298 .addImm(MI.getOperand(3).getImm()) // 'pred'
2299 .add(MI.getOperand(4))
2300 .add(makeImplicit(MI.getOperand(1)));
2301
2302 MI.eraseFromParent();
2303 return true;
2304 }
2305 case ARM::t2MOVCCr:
2306 case ARM::MOVCCr: {
2307 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr;
2308 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2309 MI.getOperand(1).getReg())
2310 .add(MI.getOperand(2))
2311 .addImm(MI.getOperand(3).getImm()) // 'pred'
2312 .add(MI.getOperand(4))
2313 .add(condCodeOp()) // 's' bit
2314 .add(makeImplicit(MI.getOperand(1)));
2315
2316 MI.eraseFromParent();
2317 return true;
2318 }
2319 case ARM::MOVCCsi: {
2320 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2321 (MI.getOperand(1).getReg()))
2322 .add(MI.getOperand(2))
2323 .addImm(MI.getOperand(3).getImm())
2324 .addImm(MI.getOperand(4).getImm()) // 'pred'
2325 .add(MI.getOperand(5))
2326 .add(condCodeOp()) // 's' bit
2327 .add(makeImplicit(MI.getOperand(1)));
2328
2329 MI.eraseFromParent();
2330 return true;
2331 }
2332 case ARM::MOVCCsr: {
2333 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr),
2334 (MI.getOperand(1).getReg()))
2335 .add(MI.getOperand(2))
2336 .add(MI.getOperand(3))
2337 .addImm(MI.getOperand(4).getImm())
2338 .addImm(MI.getOperand(5).getImm()) // 'pred'
2339 .add(MI.getOperand(6))
2340 .add(condCodeOp()) // 's' bit
2341 .add(makeImplicit(MI.getOperand(1)));
2342
2343 MI.eraseFromParent();
2344 return true;
2345 }
2346 case ARM::t2MOVCCi16:
2347 case ARM::MOVCCi16: {
2348 unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16;
2349 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2350 MI.getOperand(1).getReg())
2351 .addImm(MI.getOperand(2).getImm())
2352 .addImm(MI.getOperand(3).getImm()) // 'pred'
2353 .add(MI.getOperand(4))
2354 .add(makeImplicit(MI.getOperand(1)));
2355 MI.eraseFromParent();
2356 return true;
2357 }
2358 case ARM::t2MOVCCi:
2359 case ARM::MOVCCi: {
2360 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi;
2361 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2362 MI.getOperand(1).getReg())
2363 .addImm(MI.getOperand(2).getImm())
2364 .addImm(MI.getOperand(3).getImm()) // 'pred'
2365 .add(MI.getOperand(4))
2366 .add(condCodeOp()) // 's' bit
2367 .add(makeImplicit(MI.getOperand(1)));
2368
2369 MI.eraseFromParent();
2370 return true;
2371 }
2372 case ARM::t2MVNCCi:
2373 case ARM::MVNCCi: {
2374 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi;
2375 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2376 MI.getOperand(1).getReg())
2377 .addImm(MI.getOperand(2).getImm())
2378 .addImm(MI.getOperand(3).getImm()) // 'pred'
2379 .add(MI.getOperand(4))
2380 .add(condCodeOp()) // 's' bit
2381 .add(makeImplicit(MI.getOperand(1)));
2382
2383 MI.eraseFromParent();
2384 return true;
2385 }
2386 case ARM::t2MOVCClsl:
2387 case ARM::t2MOVCClsr:
2388 case ARM::t2MOVCCasr:
2389 case ARM::t2MOVCCror: {
2390 unsigned NewOpc;
2391 switch (Opcode) {
2392 case ARM::t2MOVCClsl: NewOpc = ARM::t2LSLri; break;
2393 case ARM::t2MOVCClsr: NewOpc = ARM::t2LSRri; break;
2394 case ARM::t2MOVCCasr: NewOpc = ARM::t2ASRri; break;
2395 case ARM::t2MOVCCror: NewOpc = ARM::t2RORri; break;
2396 default: llvm_unreachable("unexpeced conditional move");
2397 }
2398 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2399 MI.getOperand(1).getReg())
2400 .add(MI.getOperand(2))
2401 .addImm(MI.getOperand(3).getImm())
2402 .addImm(MI.getOperand(4).getImm()) // 'pred'
2403 .add(MI.getOperand(5))
2404 .add(condCodeOp()) // 's' bit
2405 .add(makeImplicit(MI.getOperand(1)));
2406 MI.eraseFromParent();
2407 return true;
2408 }
2409 case ARM::Int_eh_sjlj_dispatchsetup: {
2410 MachineFunction &MF = *MI.getParent()->getParent();
2411 const ARMBaseInstrInfo *AII =
2412 static_cast<const ARMBaseInstrInfo*>(TII);
2413 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
2414 // For functions using a base pointer, we rematerialize it (via the frame
2415 // pointer) here since eh.sjlj.setjmp and eh.sjlj.longjmp don't do it
2416 // for us. Otherwise, expand to nothing.
2417 if (RI.hasBasePointer(MF)) {
2418 int32_t NumBytes = AFI->getFramePtrSpillOffset();
2419 Register FramePtr = RI.getFrameRegister(MF);
2420 assert(MF.getSubtarget().getFrameLowering()->hasFP(MF) &&
2421 "base pointer without frame pointer?");
2422
2423 if (AFI->isThumb2Function()) {
2424 emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2425 FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
2426 } else if (AFI->isThumbFunction()) {
2427 emitThumbRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2428 FramePtr, -NumBytes, *TII, RI);
2429 } else {
2430 emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2431 FramePtr, -NumBytes, ARMCC::AL, 0,
2432 *TII);
2433 }
2434 // If there's dynamic realignment, adjust for it.
2435 if (RI.hasStackRealignment(MF)) {
2436 MachineFrameInfo &MFI = MF.getFrameInfo();
2437 Align MaxAlign = MFI.getMaxAlign();
2438 assert (!AFI->isThumb1OnlyFunction());
2439 // Emit bic r6, r6, MaxAlign
2440 assert(MaxAlign <= Align(256) &&
2441 "The BIC instruction cannot encode "
2442 "immediates larger than 256 with all lower "
2443 "bits set.");
2444 unsigned bicOpc = AFI->isThumbFunction() ?
2445 ARM::t2BICri : ARM::BICri;
2446 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(bicOpc), ARM::R6)
2447 .addReg(ARM::R6, RegState::Kill)
2448 .addImm(MaxAlign.value() - 1)
2449 .add(predOps(ARMCC::AL))
2450 .add(condCodeOp());
2451 }
2452 }
2453 MI.eraseFromParent();
2454 return true;
2455 }
2456
2457 case ARM::MOVsrl_flag:
2458 case ARM::MOVsra_flag: {
2459 // These are just fancy MOVs instructions.
2460 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2461 MI.getOperand(0).getReg())
2462 .add(MI.getOperand(1))
2463 .addImm(ARM_AM::getSORegOpc(
2464 (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1))
2465 .add(predOps(ARMCC::AL))
2466 .addReg(ARM::CPSR, RegState::Define);
2467 MI.eraseFromParent();
2468 return true;
2469 }
2470 case ARM::RRX: {
2471 // This encodes as "MOVs Rd, Rm, rrx
2472 MachineInstrBuilder MIB =
2473 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2474 MI.getOperand(0).getReg())
2475 .add(MI.getOperand(1))
2476 .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0))
2477 .add(predOps(ARMCC::AL))
2478 .add(condCodeOp());
2479 TransferImpOps(MI, MIB, MIB);
2480 MI.eraseFromParent();
2481 return true;
2482 }
2483 case ARM::tTPsoft:
2484 case ARM::TPsoft: {
2485 const bool Thumb = Opcode == ARM::tTPsoft;
2486
2487 MachineInstrBuilder MIB;
2488 MachineFunction *MF = MBB.getParent();
2489 if (STI->genLongCalls()) {
2490 MachineConstantPool *MCP = MF->getConstantPool();
2491 unsigned PCLabelID = AFI->createPICLabelUId();
2492 MachineConstantPoolValue *CPV =
2493 ARMConstantPoolSymbol::Create(MF->getFunction().getContext(),
2494 "__aeabi_read_tp", PCLabelID, 0);
2495 Register Reg = MI.getOperand(0).getReg();
2496 MIB =
2497 BuildMI(MBB, MBBI, MI.getDebugLoc(),
2498 TII->get(Thumb ? ARM::tLDRpci : ARM::LDRi12), Reg)
2499 .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2500 if (!Thumb)
2501 MIB.addImm(0);
2502 MIB.add(predOps(ARMCC::AL));
2503
2504 MIB =
2505 BuildMI(MBB, MBBI, MI.getDebugLoc(),
2506 TII->get(Thumb ? gettBLXrOpcode(*MF) : getBLXOpcode(*MF)));
2507 if (Thumb)
2508 MIB.add(predOps(ARMCC::AL));
2509 MIB.addReg(Reg, RegState::Kill);
2510 } else {
2511 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2512 TII->get(Thumb ? ARM::tBL : ARM::BL));
2513 if (Thumb)
2514 MIB.add(predOps(ARMCC::AL));
2515 MIB.addExternalSymbol("__aeabi_read_tp", 0);
2516 }
2517
2518 MIB.cloneMemRefs(MI);
2519 TransferImpOps(MI, MIB, MIB);
2520 // Update the call site info.
2521 if (MI.isCandidateForCallSiteEntry())
2522 MF->moveCallSiteInfo(&MI, &*MIB);
2523 MI.eraseFromParent();
2524 return true;
2525 }
2526 case ARM::tLDRpci_pic:
2527 case ARM::t2LDRpci_pic: {
2528 unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
2529 ? ARM::tLDRpci : ARM::t2LDRpci;
2530 Register DstReg = MI.getOperand(0).getReg();
2531 bool DstIsDead = MI.getOperand(0).isDead();
2532 MachineInstrBuilder MIB1 =
2533 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
2534 .add(MI.getOperand(1))
2535 .add(predOps(ARMCC::AL));
2536 MIB1.cloneMemRefs(MI);
2537 MachineInstrBuilder MIB2 =
2538 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
2539 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2540 .addReg(DstReg)
2541 .add(MI.getOperand(2));
2542 TransferImpOps(MI, MIB1, MIB2);
2543 MI.eraseFromParent();
2544 return true;
2545 }
2546
2547 case ARM::LDRLIT_ga_abs:
2548 case ARM::LDRLIT_ga_pcrel:
2549 case ARM::LDRLIT_ga_pcrel_ldr:
2550 case ARM::tLDRLIT_ga_abs:
2551 case ARM::t2LDRLIT_ga_pcrel:
2552 case ARM::tLDRLIT_ga_pcrel: {
2553 Register DstReg = MI.getOperand(0).getReg();
2554 bool DstIsDead = MI.getOperand(0).isDead();
2555 const MachineOperand &MO1 = MI.getOperand(1);
2556 auto Flags = MO1.getTargetFlags();
2557 const GlobalValue *GV = MO1.getGlobal();
2558 bool IsARM = Opcode != ARM::tLDRLIT_ga_pcrel &&
2559 Opcode != ARM::tLDRLIT_ga_abs &&
2560 Opcode != ARM::t2LDRLIT_ga_pcrel;
2561 bool IsPIC =
2562 Opcode != ARM::LDRLIT_ga_abs && Opcode != ARM::tLDRLIT_ga_abs;
2563 unsigned LDRLITOpc = IsARM ? ARM::LDRi12 : ARM::tLDRpci;
2564 if (Opcode == ARM::t2LDRLIT_ga_pcrel)
2565 LDRLITOpc = ARM::t2LDRpci;
2566 unsigned PICAddOpc =
2567 IsARM
2568 ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2569 : ARM::tPICADD;
2570
2571 // We need a new const-pool entry to load from.
2572 MachineConstantPool *MCP = MBB.getParent()->getConstantPool();
2573 unsigned ARMPCLabelIndex = 0;
2574 MachineConstantPoolValue *CPV;
2575
2576 if (IsPIC) {
2577 unsigned PCAdj = IsARM ? 8 : 4;
2578 auto Modifier = (Flags & ARMII::MO_GOT)
2579 ? ARMCP::GOT_PREL
2580 : ARMCP::no_modifier;
2581 ARMPCLabelIndex = AFI->createPICLabelUId();
2582 CPV = ARMConstantPoolConstant::Create(
2583 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, Modifier,
2584 /*AddCurrentAddr*/ Modifier == ARMCP::GOT_PREL);
2585 } else
2586 CPV = ARMConstantPoolConstant::Create(GV, ARMCP::no_modifier);
2587
2588 MachineInstrBuilder MIB =
2589 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LDRLITOpc), DstReg)
2590 .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2591 if (IsARM)
2592 MIB.addImm(0);
2593 MIB.add(predOps(ARMCC::AL));
2594
2595 if (IsPIC) {
2596 MachineInstrBuilder MIB =
2597 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(PICAddOpc))
2598 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2599 .addReg(DstReg)
2600 .addImm(ARMPCLabelIndex);
2601
2602 if (IsARM)
2603 MIB.add(predOps(ARMCC::AL));
2604 }
2605
2606 MI.eraseFromParent();
2607 return true;
2608 }
2609 case ARM::MOV_ga_pcrel:
2610 case ARM::MOV_ga_pcrel_ldr:
2611 case ARM::t2MOV_ga_pcrel: {
2612 // Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode.
2613 unsigned LabelId = AFI->createPICLabelUId();
2614 Register DstReg = MI.getOperand(0).getReg();
2615 bool DstIsDead = MI.getOperand(0).isDead();
2616 const MachineOperand &MO1 = MI.getOperand(1);
2617 const GlobalValue *GV = MO1.getGlobal();
2618 unsigned TF = MO1.getTargetFlags();
2619 bool isARM = Opcode != ARM::t2MOV_ga_pcrel;
2620 unsigned LO16Opc = isARM ? ARM::MOVi16_ga_pcrel : ARM::t2MOVi16_ga_pcrel;
2621 unsigned HI16Opc = isARM ? ARM::MOVTi16_ga_pcrel :ARM::t2MOVTi16_ga_pcrel;
2622 unsigned LO16TF = TF | ARMII::MO_LO16;
2623 unsigned HI16TF = TF | ARMII::MO_HI16;
2624 unsigned PICAddOpc = isARM
2625 ? (Opcode == ARM::MOV_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2626 : ARM::tPICADD;
2627 MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2628 TII->get(LO16Opc), DstReg)
2629 .addGlobalAddress(GV, MO1.getOffset(), TF | LO16TF)
2630 .addImm(LabelId);
2631
2632 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc), DstReg)
2633 .addReg(DstReg)
2634 .addGlobalAddress(GV, MO1.getOffset(), TF | HI16TF)
2635 .addImm(LabelId);
2636
2637 MachineInstrBuilder MIB3 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2638 TII->get(PICAddOpc))
2639 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2640 .addReg(DstReg).addImm(LabelId);
2641 if (isARM) {
2642 MIB3.add(predOps(ARMCC::AL));
2643 if (Opcode == ARM::MOV_ga_pcrel_ldr)
2644 MIB3.cloneMemRefs(MI);
2645 }
2646 TransferImpOps(MI, MIB1, MIB3);
2647 MI.eraseFromParent();
2648 return true;
2649 }
2650
2651 case ARM::MOVi32imm:
2652 case ARM::MOVCCi32imm:
2653 case ARM::t2MOVi32imm:
2654 case ARM::t2MOVCCi32imm:
2655 ExpandMOV32BitImm(MBB, MBBI);
2656 return true;
2657
2658 case ARM::SUBS_PC_LR: {
2659 MachineInstrBuilder MIB =
2660 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC)
2661 .addReg(ARM::LR)
2662 .add(MI.getOperand(0))
2663 .add(MI.getOperand(1))
2664 .add(MI.getOperand(2))
2665 .addReg(ARM::CPSR, RegState::Undef);
2666 TransferImpOps(MI, MIB, MIB);
2667 MI.eraseFromParent();
2668 return true;
2669 }
2670 case ARM::VLDMQIA: {
2671 unsigned NewOpc = ARM::VLDMDIA;
2672 MachineInstrBuilder MIB =
2673 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2674 unsigned OpIdx = 0;
2675
2676 // Grab the Q register destination.
2677 bool DstIsDead = MI.getOperand(OpIdx).isDead();
2678 Register DstReg = MI.getOperand(OpIdx++).getReg();
2679
2680 // Copy the source register.
2681 MIB.add(MI.getOperand(OpIdx++));
2682
2683 // Copy the predicate operands.
2684 MIB.add(MI.getOperand(OpIdx++));
2685 MIB.add(MI.getOperand(OpIdx++));
2686
2687 // Add the destination operands (D subregs).
2688 Register D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
2689 Register D1 = TRI->getSubReg(DstReg, ARM::dsub_1);
2690 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
2691 .addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
2692
2693 // Add an implicit def for the super-register.
2694 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
2695 TransferImpOps(MI, MIB, MIB);
2696 MIB.cloneMemRefs(MI);
2697 MI.eraseFromParent();
2698 return true;
2699 }
2700
2701 case ARM::VSTMQIA: {
2702 unsigned NewOpc = ARM::VSTMDIA;
2703 MachineInstrBuilder MIB =
2704 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2705 unsigned OpIdx = 0;
2706
2707 // Grab the Q register source.
2708 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
2709 Register SrcReg = MI.getOperand(OpIdx++).getReg();
2710
2711 // Copy the destination register.
2712 MachineOperand Dst(MI.getOperand(OpIdx++));
2713 MIB.add(Dst);
2714
2715 // Copy the predicate operands.
2716 MIB.add(MI.getOperand(OpIdx++));
2717 MIB.add(MI.getOperand(OpIdx++));
2718
2719 // Add the source operands (D subregs).
2720 Register D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
2721 Register D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
2722 MIB.addReg(D0, SrcIsKill ? RegState::Kill : 0)
2723 .addReg(D1, SrcIsKill ? RegState::Kill : 0);
2724
2725 if (SrcIsKill) // Add an implicit kill for the Q register.
2726 MIB->addRegisterKilled(SrcReg, TRI, true);
2727
2728 TransferImpOps(MI, MIB, MIB);
2729 MIB.cloneMemRefs(MI);
2730 MI.eraseFromParent();
2731 return true;
2732 }
2733
2734 case ARM::VLD2q8Pseudo:
2735 case ARM::VLD2q16Pseudo:
2736 case ARM::VLD2q32Pseudo:
2737 case ARM::VLD2q8PseudoWB_fixed:
2738 case ARM::VLD2q16PseudoWB_fixed:
2739 case ARM::VLD2q32PseudoWB_fixed:
2740 case ARM::VLD2q8PseudoWB_register:
2741 case ARM::VLD2q16PseudoWB_register:
2742 case ARM::VLD2q32PseudoWB_register:
2743 case ARM::VLD3d8Pseudo:
2744 case ARM::VLD3d16Pseudo:
2745 case ARM::VLD3d32Pseudo:
2746 case ARM::VLD1d8TPseudo:
2747 case ARM::VLD1d8TPseudoWB_fixed:
2748 case ARM::VLD1d8TPseudoWB_register:
2749 case ARM::VLD1d16TPseudo:
2750 case ARM::VLD1d16TPseudoWB_fixed:
2751 case ARM::VLD1d16TPseudoWB_register:
2752 case ARM::VLD1d32TPseudo:
2753 case ARM::VLD1d32TPseudoWB_fixed:
2754 case ARM::VLD1d32TPseudoWB_register:
2755 case ARM::VLD1d64TPseudo:
2756 case ARM::VLD1d64TPseudoWB_fixed:
2757 case ARM::VLD1d64TPseudoWB_register:
2758 case ARM::VLD3d8Pseudo_UPD:
2759 case ARM::VLD3d16Pseudo_UPD:
2760 case ARM::VLD3d32Pseudo_UPD:
2761 case ARM::VLD3q8Pseudo_UPD:
2762 case ARM::VLD3q16Pseudo_UPD:
2763 case ARM::VLD3q32Pseudo_UPD:
2764 case ARM::VLD3q8oddPseudo:
2765 case ARM::VLD3q16oddPseudo:
2766 case ARM::VLD3q32oddPseudo:
2767 case ARM::VLD3q8oddPseudo_UPD:
2768 case ARM::VLD3q16oddPseudo_UPD:
2769 case ARM::VLD3q32oddPseudo_UPD:
2770 case ARM::VLD4d8Pseudo:
2771 case ARM::VLD4d16Pseudo:
2772 case ARM::VLD4d32Pseudo:
2773 case ARM::VLD1d8QPseudo:
2774 case ARM::VLD1d8QPseudoWB_fixed:
2775 case ARM::VLD1d8QPseudoWB_register:
2776 case ARM::VLD1d16QPseudo:
2777 case ARM::VLD1d16QPseudoWB_fixed:
2778 case ARM::VLD1d16QPseudoWB_register:
2779 case ARM::VLD1d32QPseudo:
2780 case ARM::VLD1d32QPseudoWB_fixed:
2781 case ARM::VLD1d32QPseudoWB_register:
2782 case ARM::VLD1d64QPseudo:
2783 case ARM::VLD1d64QPseudoWB_fixed:
2784 case ARM::VLD1d64QPseudoWB_register:
2785 case ARM::VLD1q8HighQPseudo:
2786 case ARM::VLD1q8HighQPseudo_UPD:
2787 case ARM::VLD1q8LowQPseudo_UPD:
2788 case ARM::VLD1q8HighTPseudo:
2789 case ARM::VLD1q8HighTPseudo_UPD:
2790 case ARM::VLD1q8LowTPseudo_UPD:
2791 case ARM::VLD1q16HighQPseudo:
2792 case ARM::VLD1q16HighQPseudo_UPD:
2793 case ARM::VLD1q16LowQPseudo_UPD:
2794 case ARM::VLD1q16HighTPseudo:
2795 case ARM::VLD1q16HighTPseudo_UPD:
2796 case ARM::VLD1q16LowTPseudo_UPD:
2797 case ARM::VLD1q32HighQPseudo:
2798 case ARM::VLD1q32HighQPseudo_UPD:
2799 case ARM::VLD1q32LowQPseudo_UPD:
2800 case ARM::VLD1q32HighTPseudo:
2801 case ARM::VLD1q32HighTPseudo_UPD:
2802 case ARM::VLD1q32LowTPseudo_UPD:
2803 case ARM::VLD1q64HighQPseudo:
2804 case ARM::VLD1q64HighQPseudo_UPD:
2805 case ARM::VLD1q64LowQPseudo_UPD:
2806 case ARM::VLD1q64HighTPseudo:
2807 case ARM::VLD1q64HighTPseudo_UPD:
2808 case ARM::VLD1q64LowTPseudo_UPD:
2809 case ARM::VLD4d8Pseudo_UPD:
2810 case ARM::VLD4d16Pseudo_UPD:
2811 case ARM::VLD4d32Pseudo_UPD:
2812 case ARM::VLD4q8Pseudo_UPD:
2813 case ARM::VLD4q16Pseudo_UPD:
2814 case ARM::VLD4q32Pseudo_UPD:
2815 case ARM::VLD4q8oddPseudo:
2816 case ARM::VLD4q16oddPseudo:
2817 case ARM::VLD4q32oddPseudo:
2818 case ARM::VLD4q8oddPseudo_UPD:
2819 case ARM::VLD4q16oddPseudo_UPD:
2820 case ARM::VLD4q32oddPseudo_UPD:
2821 case ARM::VLD3DUPd8Pseudo:
2822 case ARM::VLD3DUPd16Pseudo:
2823 case ARM::VLD3DUPd32Pseudo:
2824 case ARM::VLD3DUPd8Pseudo_UPD:
2825 case ARM::VLD3DUPd16Pseudo_UPD:
2826 case ARM::VLD3DUPd32Pseudo_UPD:
2827 case ARM::VLD4DUPd8Pseudo:
2828 case ARM::VLD4DUPd16Pseudo:
2829 case ARM::VLD4DUPd32Pseudo:
2830 case ARM::VLD4DUPd8Pseudo_UPD:
2831 case ARM::VLD4DUPd16Pseudo_UPD:
2832 case ARM::VLD4DUPd32Pseudo_UPD:
2833 case ARM::VLD2DUPq8EvenPseudo:
2834 case ARM::VLD2DUPq8OddPseudo:
2835 case ARM::VLD2DUPq16EvenPseudo:
2836 case ARM::VLD2DUPq16OddPseudo:
2837 case ARM::VLD2DUPq32EvenPseudo:
2838 case ARM::VLD2DUPq32OddPseudo:
2839 case ARM::VLD2DUPq8OddPseudoWB_fixed:
2840 case ARM::VLD2DUPq8OddPseudoWB_register:
2841 case ARM::VLD2DUPq16OddPseudoWB_fixed:
2842 case ARM::VLD2DUPq16OddPseudoWB_register:
2843 case ARM::VLD2DUPq32OddPseudoWB_fixed:
2844 case ARM::VLD2DUPq32OddPseudoWB_register:
2845 case ARM::VLD3DUPq8EvenPseudo:
2846 case ARM::VLD3DUPq8OddPseudo:
2847 case ARM::VLD3DUPq16EvenPseudo:
2848 case ARM::VLD3DUPq16OddPseudo:
2849 case ARM::VLD3DUPq32EvenPseudo:
2850 case ARM::VLD3DUPq32OddPseudo:
2851 case ARM::VLD3DUPq8OddPseudo_UPD:
2852 case ARM::VLD3DUPq16OddPseudo_UPD:
2853 case ARM::VLD3DUPq32OddPseudo_UPD:
2854 case ARM::VLD4DUPq8EvenPseudo:
2855 case ARM::VLD4DUPq8OddPseudo:
2856 case ARM::VLD4DUPq16EvenPseudo:
2857 case ARM::VLD4DUPq16OddPseudo:
2858 case ARM::VLD4DUPq32EvenPseudo:
2859 case ARM::VLD4DUPq32OddPseudo:
2860 case ARM::VLD4DUPq8OddPseudo_UPD:
2861 case ARM::VLD4DUPq16OddPseudo_UPD:
2862 case ARM::VLD4DUPq32OddPseudo_UPD:
2863 ExpandVLD(MBBI);
2864 return true;
2865
2866 case ARM::VST2q8Pseudo:
2867 case ARM::VST2q16Pseudo:
2868 case ARM::VST2q32Pseudo:
2869 case ARM::VST2q8PseudoWB_fixed:
2870 case ARM::VST2q16PseudoWB_fixed:
2871 case ARM::VST2q32PseudoWB_fixed:
2872 case ARM::VST2q8PseudoWB_register:
2873 case ARM::VST2q16PseudoWB_register:
2874 case ARM::VST2q32PseudoWB_register:
2875 case ARM::VST3d8Pseudo:
2876 case ARM::VST3d16Pseudo:
2877 case ARM::VST3d32Pseudo:
2878 case ARM::VST1d8TPseudo:
2879 case ARM::VST1d8TPseudoWB_fixed:
2880 case ARM::VST1d8TPseudoWB_register:
2881 case ARM::VST1d16TPseudo:
2882 case ARM::VST1d16TPseudoWB_fixed:
2883 case ARM::VST1d16TPseudoWB_register:
2884 case ARM::VST1d32TPseudo:
2885 case ARM::VST1d32TPseudoWB_fixed:
2886 case ARM::VST1d32TPseudoWB_register:
2887 case ARM::VST1d64TPseudo:
2888 case ARM::VST1d64TPseudoWB_fixed:
2889 case ARM::VST1d64TPseudoWB_register:
2890 case ARM::VST3d8Pseudo_UPD:
2891 case ARM::VST3d16Pseudo_UPD:
2892 case ARM::VST3d32Pseudo_UPD:
2893 case ARM::VST3q8Pseudo_UPD:
2894 case ARM::VST3q16Pseudo_UPD:
2895 case ARM::VST3q32Pseudo_UPD:
2896 case ARM::VST3q8oddPseudo:
2897 case ARM::VST3q16oddPseudo:
2898 case ARM::VST3q32oddPseudo:
2899 case ARM::VST3q8oddPseudo_UPD:
2900 case ARM::VST3q16oddPseudo_UPD:
2901 case ARM::VST3q32oddPseudo_UPD:
2902 case ARM::VST4d8Pseudo:
2903 case ARM::VST4d16Pseudo:
2904 case ARM::VST4d32Pseudo:
2905 case ARM::VST1d8QPseudo:
2906 case ARM::VST1d8QPseudoWB_fixed:
2907 case ARM::VST1d8QPseudoWB_register:
2908 case ARM::VST1d16QPseudo:
2909 case ARM::VST1d16QPseudoWB_fixed:
2910 case ARM::VST1d16QPseudoWB_register:
2911 case ARM::VST1d32QPseudo:
2912 case ARM::VST1d32QPseudoWB_fixed:
2913 case ARM::VST1d32QPseudoWB_register:
2914 case ARM::VST1d64QPseudo:
2915 case ARM::VST1d64QPseudoWB_fixed:
2916 case ARM::VST1d64QPseudoWB_register:
2917 case ARM::VST4d8Pseudo_UPD:
2918 case ARM::VST4d16Pseudo_UPD:
2919 case ARM::VST4d32Pseudo_UPD:
2920 case ARM::VST1q8HighQPseudo:
2921 case ARM::VST1q8LowQPseudo_UPD:
2922 case ARM::VST1q8HighTPseudo:
2923 case ARM::VST1q8LowTPseudo_UPD:
2924 case ARM::VST1q16HighQPseudo:
2925 case ARM::VST1q16LowQPseudo_UPD:
2926 case ARM::VST1q16HighTPseudo:
2927 case ARM::VST1q16LowTPseudo_UPD:
2928 case ARM::VST1q32HighQPseudo:
2929 case ARM::VST1q32LowQPseudo_UPD:
2930 case ARM::VST1q32HighTPseudo:
2931 case ARM::VST1q32LowTPseudo_UPD:
2932 case ARM::VST1q64HighQPseudo:
2933 case ARM::VST1q64LowQPseudo_UPD:
2934 case ARM::VST1q64HighTPseudo:
2935 case ARM::VST1q64LowTPseudo_UPD:
2936 case ARM::VST1q8HighTPseudo_UPD:
2937 case ARM::VST1q16HighTPseudo_UPD:
2938 case ARM::VST1q32HighTPseudo_UPD:
2939 case ARM::VST1q64HighTPseudo_UPD:
2940 case ARM::VST1q8HighQPseudo_UPD:
2941 case ARM::VST1q16HighQPseudo_UPD:
2942 case ARM::VST1q32HighQPseudo_UPD:
2943 case ARM::VST1q64HighQPseudo_UPD:
2944 case ARM::VST4q8Pseudo_UPD:
2945 case ARM::VST4q16Pseudo_UPD:
2946 case ARM::VST4q32Pseudo_UPD:
2947 case ARM::VST4q8oddPseudo:
2948 case ARM::VST4q16oddPseudo:
2949 case ARM::VST4q32oddPseudo:
2950 case ARM::VST4q8oddPseudo_UPD:
2951 case ARM::VST4q16oddPseudo_UPD:
2952 case ARM::VST4q32oddPseudo_UPD:
2953 ExpandVST(MBBI);
2954 return true;
2955
2956 case ARM::VLD1LNq8Pseudo:
2957 case ARM::VLD1LNq16Pseudo:
2958 case ARM::VLD1LNq32Pseudo:
2959 case ARM::VLD1LNq8Pseudo_UPD:
2960 case ARM::VLD1LNq16Pseudo_UPD:
2961 case ARM::VLD1LNq32Pseudo_UPD:
2962 case ARM::VLD2LNd8Pseudo:
2963 case ARM::VLD2LNd16Pseudo:
2964 case ARM::VLD2LNd32Pseudo:
2965 case ARM::VLD2LNq16Pseudo:
2966 case ARM::VLD2LNq32Pseudo:
2967 case ARM::VLD2LNd8Pseudo_UPD:
2968 case ARM::VLD2LNd16Pseudo_UPD:
2969 case ARM::VLD2LNd32Pseudo_UPD:
2970 case ARM::VLD2LNq16Pseudo_UPD:
2971 case ARM::VLD2LNq32Pseudo_UPD:
2972 case ARM::VLD3LNd8Pseudo:
2973 case ARM::VLD3LNd16Pseudo:
2974 case ARM::VLD3LNd32Pseudo:
2975 case ARM::VLD3LNq16Pseudo:
2976 case ARM::VLD3LNq32Pseudo:
2977 case ARM::VLD3LNd8Pseudo_UPD:
2978 case ARM::VLD3LNd16Pseudo_UPD:
2979 case ARM::VLD3LNd32Pseudo_UPD:
2980 case ARM::VLD3LNq16Pseudo_UPD:
2981 case ARM::VLD3LNq32Pseudo_UPD:
2982 case ARM::VLD4LNd8Pseudo:
2983 case ARM::VLD4LNd16Pseudo:
2984 case ARM::VLD4LNd32Pseudo:
2985 case ARM::VLD4LNq16Pseudo:
2986 case ARM::VLD4LNq32Pseudo:
2987 case ARM::VLD4LNd8Pseudo_UPD:
2988 case ARM::VLD4LNd16Pseudo_UPD:
2989 case ARM::VLD4LNd32Pseudo_UPD:
2990 case ARM::VLD4LNq16Pseudo_UPD:
2991 case ARM::VLD4LNq32Pseudo_UPD:
2992 case ARM::VST1LNq8Pseudo:
2993 case ARM::VST1LNq16Pseudo:
2994 case ARM::VST1LNq32Pseudo:
2995 case ARM::VST1LNq8Pseudo_UPD:
2996 case ARM::VST1LNq16Pseudo_UPD:
2997 case ARM::VST1LNq32Pseudo_UPD:
2998 case ARM::VST2LNd8Pseudo:
2999 case ARM::VST2LNd16Pseudo:
3000 case ARM::VST2LNd32Pseudo:
3001 case ARM::VST2LNq16Pseudo:
3002 case ARM::VST2LNq32Pseudo:
3003 case ARM::VST2LNd8Pseudo_UPD:
3004 case ARM::VST2LNd16Pseudo_UPD:
3005 case ARM::VST2LNd32Pseudo_UPD:
3006 case ARM::VST2LNq16Pseudo_UPD:
3007 case ARM::VST2LNq32Pseudo_UPD:
3008 case ARM::VST3LNd8Pseudo:
3009 case ARM::VST3LNd16Pseudo:
3010 case ARM::VST3LNd32Pseudo:
3011 case ARM::VST3LNq16Pseudo:
3012 case ARM::VST3LNq32Pseudo:
3013 case ARM::VST3LNd8Pseudo_UPD:
3014 case ARM::VST3LNd16Pseudo_UPD:
3015 case ARM::VST3LNd32Pseudo_UPD:
3016 case ARM::VST3LNq16Pseudo_UPD:
3017 case ARM::VST3LNq32Pseudo_UPD:
3018 case ARM::VST4LNd8Pseudo:
3019 case ARM::VST4LNd16Pseudo:
3020 case ARM::VST4LNd32Pseudo:
3021 case ARM::VST4LNq16Pseudo:
3022 case ARM::VST4LNq32Pseudo:
3023 case ARM::VST4LNd8Pseudo_UPD:
3024 case ARM::VST4LNd16Pseudo_UPD:
3025 case ARM::VST4LNd32Pseudo_UPD:
3026 case ARM::VST4LNq16Pseudo_UPD:
3027 case ARM::VST4LNq32Pseudo_UPD:
3028 ExpandLaneOp(MBBI);
3029 return true;
3030
3031 case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false); return true;
3032 case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false); return true;
3033 case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true); return true;
3034 case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true); return true;
3035
3036 case ARM::MQQPRLoad:
3037 case ARM::MQQPRStore:
3038 case ARM::MQQQQPRLoad:
3039 case ARM::MQQQQPRStore:
3040 ExpandMQQPRLoadStore(MBBI);
3041 return true;
3042
3043 case ARM::tCMP_SWAP_8:
3044 assert(STI->isThumb());
3045 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXB, ARM::t2STREXB, ARM::tUXTB,
3046 NextMBBI);
3047 case ARM::tCMP_SWAP_16:
3048 assert(STI->isThumb());
3049 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXH, ARM::t2STREXH, ARM::tUXTH,
3050 NextMBBI);
3051 case ARM::tCMP_SWAP_32:
3052 assert(STI->isThumb());
3053 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREX, ARM::t2STREX, 0, NextMBBI);
3054
3055 case ARM::CMP_SWAP_8:
3056 assert(!STI->isThumb());
3057 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXB, ARM::STREXB, ARM::UXTB,
3058 NextMBBI);
3059 case ARM::CMP_SWAP_16:
3060 assert(!STI->isThumb());
3061 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXH, ARM::STREXH, ARM::UXTH,
3062 NextMBBI);
3063 case ARM::CMP_SWAP_32:
3064 assert(!STI->isThumb());
3065 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREX, ARM::STREX, 0, NextMBBI);
3066
3067 case ARM::CMP_SWAP_64:
3068 return ExpandCMP_SWAP_64(MBB, MBBI, NextMBBI);
3069
3070 case ARM::tBL_PUSHLR:
3071 case ARM::BL_PUSHLR: {
3072 const bool Thumb = Opcode == ARM::tBL_PUSHLR;
3073 Register Reg = MI.getOperand(0).getReg();
3074 assert(Reg == ARM::LR && "expect LR register!");
3075 MachineInstrBuilder MIB;
3076 if (Thumb) {
3077 // push {lr}
3078 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPUSH))
3079 .add(predOps(ARMCC::AL))
3080 .addReg(Reg);
3081
3082 // bl __gnu_mcount_nc
3083 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tBL));
3084 } else {
3085 // stmdb sp!, {lr}
3086 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::STMDB_UPD))
3087 .addReg(ARM::SP, RegState::Define)
3088 .addReg(ARM::SP)
3089 .add(predOps(ARMCC::AL))
3090 .addReg(Reg);
3091
3092 // bl __gnu_mcount_nc
3093 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::BL));
3094 }
3095 MIB.cloneMemRefs(MI);
3096 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
3097 MIB.add(MO);
3098 MI.eraseFromParent();
3099 return true;
3100 }
3101 case ARM::t2CALL_BTI: {
3102 MachineFunction &MF = *MI.getMF();
3103 MachineInstrBuilder MIB =
3104 BuildMI(MF, MI.getDebugLoc(), TII->get(ARM::tBL));
3105 MIB.cloneMemRefs(MI);
3106 for (unsigned i = 0; i < MI.getNumOperands(); ++i)
3107 MIB.add(MI.getOperand(i));
3108 if (MI.isCandidateForCallSiteEntry())
3109 MF.moveCallSiteInfo(&MI, MIB.getInstr());
3110 MIBundleBuilder Bundler(MBB, MI);
3111 Bundler.append(MIB);
3112 Bundler.append(BuildMI(MF, MI.getDebugLoc(), TII->get(ARM::t2BTI)));
3113 finalizeBundle(MBB, Bundler.begin(), Bundler.end());
3114 MI.eraseFromParent();
3115 return true;
3116 }
3117 case ARM::LOADDUAL:
3118 case ARM::STOREDUAL: {
3119 Register PairReg = MI.getOperand(0).getReg();
3120
3121 MachineInstrBuilder MIB =
3122 BuildMI(MBB, MBBI, MI.getDebugLoc(),
3123 TII->get(Opcode == ARM::LOADDUAL ? ARM::LDRD : ARM::STRD))
3124 .addReg(TRI->getSubReg(PairReg, ARM::gsub_0),
3125 Opcode == ARM::LOADDUAL ? RegState::Define : 0)
3126 .addReg(TRI->getSubReg(PairReg, ARM::gsub_1),
3127 Opcode == ARM::LOADDUAL ? RegState::Define : 0);
3128 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
3129 MIB.add(MO);
3130 MIB.add(predOps(ARMCC::AL));
3131 MIB.cloneMemRefs(MI);
3132 MI.eraseFromParent();
3133 return true;
3134 }
3135 }
3136 }
3137
ExpandMBB(MachineBasicBlock & MBB)3138 bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
3139 bool Modified = false;
3140
3141 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
3142 while (MBBI != E) {
3143 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
3144 Modified |= ExpandMI(MBB, MBBI, NMBBI);
3145 MBBI = NMBBI;
3146 }
3147
3148 return Modified;
3149 }
3150
runOnMachineFunction(MachineFunction & MF)3151 bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
3152 STI = &MF.getSubtarget<ARMSubtarget>();
3153 TII = STI->getInstrInfo();
3154 TRI = STI->getRegisterInfo();
3155 AFI = MF.getInfo<ARMFunctionInfo>();
3156
3157 LLVM_DEBUG(dbgs() << "********** ARM EXPAND PSEUDO INSTRUCTIONS **********\n"
3158 << "********** Function: " << MF.getName() << '\n');
3159
3160 bool Modified = false;
3161 for (MachineBasicBlock &MBB : MF)
3162 Modified |= ExpandMBB(MBB);
3163 if (VerifyARMPseudo)
3164 MF.verify(this, "After expanding ARM pseudo instructions.");
3165
3166 LLVM_DEBUG(dbgs() << "***************************************************\n");
3167 return Modified;
3168 }
3169
3170 /// createARMExpandPseudoPass - returns an instance of the pseudo instruction
3171 /// expansion pass.
createARMExpandPseudoPass()3172 FunctionPass *llvm::createARMExpandPseudoPass() {
3173 return new ARMExpandPseudo();
3174 }
3175