1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "ARMTargetTransformInfo.h"
25 #include "MCTargetDesc/ARMAddressingModes.h"
26 #include "MCTargetDesc/ARMBaseInfo.h"
27 #include "Utils/ARMBaseInfo.h"
28 #include "llvm/ADT/APFloat.h"
29 #include "llvm/ADT/APInt.h"
30 #include "llvm/ADT/ArrayRef.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/StringExtras.h"
38 #include "llvm/ADT/StringRef.h"
39 #include "llvm/ADT/StringSwitch.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ComplexDeinterleavingPass.h"
44 #include "llvm/CodeGen/ISDOpcodes.h"
45 #include "llvm/CodeGen/IntrinsicLowering.h"
46 #include "llvm/CodeGen/MachineBasicBlock.h"
47 #include "llvm/CodeGen/MachineConstantPool.h"
48 #include "llvm/CodeGen/MachineFrameInfo.h"
49 #include "llvm/CodeGen/MachineFunction.h"
50 #include "llvm/CodeGen/MachineInstr.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineJumpTableInfo.h"
53 #include "llvm/CodeGen/MachineMemOperand.h"
54 #include "llvm/CodeGen/MachineOperand.h"
55 #include "llvm/CodeGen/MachineRegisterInfo.h"
56 #include "llvm/CodeGen/MachineValueType.h"
57 #include "llvm/CodeGen/RuntimeLibcalls.h"
58 #include "llvm/CodeGen/SelectionDAG.h"
59 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
60 #include "llvm/CodeGen/SelectionDAGNodes.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/IR/Attributes.h"
68 #include "llvm/IR/CallingConv.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/Constants.h"
71 #include "llvm/IR/DataLayout.h"
72 #include "llvm/IR/DebugLoc.h"
73 #include "llvm/IR/DerivedTypes.h"
74 #include "llvm/IR/Function.h"
75 #include "llvm/IR/GlobalAlias.h"
76 #include "llvm/IR/GlobalValue.h"
77 #include "llvm/IR/GlobalVariable.h"
78 #include "llvm/IR/IRBuilder.h"
79 #include "llvm/IR/InlineAsm.h"
80 #include "llvm/IR/Instruction.h"
81 #include "llvm/IR/Instructions.h"
82 #include "llvm/IR/IntrinsicInst.h"
83 #include "llvm/IR/Intrinsics.h"
84 #include "llvm/IR/IntrinsicsARM.h"
85 #include "llvm/IR/Module.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/User.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/MC/MCInstrDesc.h"
91 #include "llvm/MC/MCInstrItineraries.h"
92 #include "llvm/MC/MCRegisterInfo.h"
93 #include "llvm/MC/MCSchedule.h"
94 #include "llvm/Support/AtomicOrdering.h"
95 #include "llvm/Support/BranchProbability.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/CodeGen.h"
98 #include "llvm/Support/CommandLine.h"
99 #include "llvm/Support/Compiler.h"
100 #include "llvm/Support/Debug.h"
101 #include "llvm/Support/ErrorHandling.h"
102 #include "llvm/Support/KnownBits.h"
103 #include "llvm/Support/MathExtras.h"
104 #include "llvm/Support/raw_ostream.h"
105 #include "llvm/Target/TargetMachine.h"
106 #include "llvm/Target/TargetOptions.h"
107 #include "llvm/TargetParser/Triple.h"
108 #include <algorithm>
109 #include <cassert>
110 #include <cstdint>
111 #include <cstdlib>
112 #include <iterator>
113 #include <limits>
114 #include <optional>
115 #include <tuple>
116 #include <utility>
117 #include <vector>
118 
119 using namespace llvm;
120 using namespace llvm::PatternMatch;
121 
122 #define DEBUG_TYPE "arm-isel"
123 
124 STATISTIC(NumTailCalls, "Number of tail calls");
125 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
126 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
127 STATISTIC(NumConstpoolPromoted,
128   "Number of constants with their storage promoted into constant pools");
129 
130 static cl::opt<bool>
131 ARMInterworking("arm-interworking", cl::Hidden,
132   cl::desc("Enable / disable ARM interworking (for debugging only)"),
133   cl::init(true));
134 
135 static cl::opt<bool> EnableConstpoolPromotion(
136     "arm-promote-constant", cl::Hidden,
137     cl::desc("Enable / disable promotion of unnamed_addr constants into "
138              "constant pools"),
139     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
140 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
141     "arm-promote-constant-max-size", cl::Hidden,
142     cl::desc("Maximum size of constant to promote into a constant pool"),
143     cl::init(64));
144 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
145     "arm-promote-constant-max-total", cl::Hidden,
146     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
147     cl::init(128));
148 
149 cl::opt<unsigned>
150 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
151   cl::desc("Maximum interleave factor for MVE VLDn to generate."),
152   cl::init(2));
153 
154 // The APCS parameter registers.
155 static const MCPhysReg GPRArgRegs[] = {
156   ARM::R0, ARM::R1, ARM::R2, ARM::R3
157 };
158 
159 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) {
160   if (VT != PromotedLdStVT) {
161     setOperationAction(ISD::LOAD, VT, Promote);
162     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
163 
164     setOperationAction(ISD::STORE, VT, Promote);
165     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
166   }
167 
168   MVT ElemTy = VT.getVectorElementType();
169   if (ElemTy != MVT::f64)
170     setOperationAction(ISD::SETCC, VT, Custom);
171   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
172   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
173   if (ElemTy == MVT::i32) {
174     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
175     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
176     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
177     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
178   } else {
179     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
180     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
181     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
182     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
183   }
184   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
185   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
186   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
187   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
188   setOperationAction(ISD::SELECT,            VT, Expand);
189   setOperationAction(ISD::SELECT_CC,         VT, Expand);
190   setOperationAction(ISD::VSELECT,           VT, Expand);
191   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
192   if (VT.isInteger()) {
193     setOperationAction(ISD::SHL, VT, Custom);
194     setOperationAction(ISD::SRA, VT, Custom);
195     setOperationAction(ISD::SRL, VT, Custom);
196   }
197 
198   // Neon does not support vector divide/remainder operations.
199   setOperationAction(ISD::SDIV, VT, Expand);
200   setOperationAction(ISD::UDIV, VT, Expand);
201   setOperationAction(ISD::FDIV, VT, Expand);
202   setOperationAction(ISD::SREM, VT, Expand);
203   setOperationAction(ISD::UREM, VT, Expand);
204   setOperationAction(ISD::FREM, VT, Expand);
205   setOperationAction(ISD::SDIVREM, VT, Expand);
206   setOperationAction(ISD::UDIVREM, VT, Expand);
207 
208   if (!VT.isFloatingPoint() &&
209       VT != MVT::v2i64 && VT != MVT::v1i64)
210     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
211       setOperationAction(Opcode, VT, Legal);
212   if (!VT.isFloatingPoint())
213     for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
214       setOperationAction(Opcode, VT, Legal);
215 }
216 
217 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
218   addRegisterClass(VT, &ARM::DPRRegClass);
219   addTypeForNEON(VT, MVT::f64);
220 }
221 
222 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
223   addRegisterClass(VT, &ARM::DPairRegClass);
224   addTypeForNEON(VT, MVT::v2f64);
225 }
226 
227 void ARMTargetLowering::setAllExpand(MVT VT) {
228   for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
229     setOperationAction(Opc, VT, Expand);
230 
231   // We support these really simple operations even on types where all
232   // the actual arithmetic has to be broken down into simpler
233   // operations or turned into library calls.
234   setOperationAction(ISD::BITCAST, VT, Legal);
235   setOperationAction(ISD::LOAD, VT, Legal);
236   setOperationAction(ISD::STORE, VT, Legal);
237   setOperationAction(ISD::UNDEF, VT, Legal);
238 }
239 
240 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
241                                        LegalizeAction Action) {
242   setLoadExtAction(ISD::EXTLOAD,  From, To, Action);
243   setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
244   setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
245 }
246 
247 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
248   const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
249 
250   for (auto VT : IntTypes) {
251     addRegisterClass(VT, &ARM::MQPRRegClass);
252     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
253     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
254     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
255     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
256     setOperationAction(ISD::SHL, VT, Custom);
257     setOperationAction(ISD::SRA, VT, Custom);
258     setOperationAction(ISD::SRL, VT, Custom);
259     setOperationAction(ISD::SMIN, VT, Legal);
260     setOperationAction(ISD::SMAX, VT, Legal);
261     setOperationAction(ISD::UMIN, VT, Legal);
262     setOperationAction(ISD::UMAX, VT, Legal);
263     setOperationAction(ISD::ABS, VT, Legal);
264     setOperationAction(ISD::SETCC, VT, Custom);
265     setOperationAction(ISD::MLOAD, VT, Custom);
266     setOperationAction(ISD::MSTORE, VT, Legal);
267     setOperationAction(ISD::CTLZ, VT, Legal);
268     setOperationAction(ISD::CTTZ, VT, Custom);
269     setOperationAction(ISD::BITREVERSE, VT, Legal);
270     setOperationAction(ISD::BSWAP, VT, Legal);
271     setOperationAction(ISD::SADDSAT, VT, Legal);
272     setOperationAction(ISD::UADDSAT, VT, Legal);
273     setOperationAction(ISD::SSUBSAT, VT, Legal);
274     setOperationAction(ISD::USUBSAT, VT, Legal);
275     setOperationAction(ISD::ABDS, VT, Legal);
276     setOperationAction(ISD::ABDU, VT, Legal);
277     setOperationAction(ISD::AVGFLOORS, VT, Legal);
278     setOperationAction(ISD::AVGFLOORU, VT, Legal);
279     setOperationAction(ISD::AVGCEILS, VT, Legal);
280     setOperationAction(ISD::AVGCEILU, VT, Legal);
281 
282     // No native support for these.
283     setOperationAction(ISD::UDIV, VT, Expand);
284     setOperationAction(ISD::SDIV, VT, Expand);
285     setOperationAction(ISD::UREM, VT, Expand);
286     setOperationAction(ISD::SREM, VT, Expand);
287     setOperationAction(ISD::UDIVREM, VT, Expand);
288     setOperationAction(ISD::SDIVREM, VT, Expand);
289     setOperationAction(ISD::CTPOP, VT, Expand);
290     setOperationAction(ISD::SELECT, VT, Expand);
291     setOperationAction(ISD::SELECT_CC, VT, Expand);
292 
293     // Vector reductions
294     setOperationAction(ISD::VECREDUCE_ADD, VT, Legal);
295     setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal);
296     setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal);
297     setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal);
298     setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal);
299     setOperationAction(ISD::VECREDUCE_MUL, VT, Custom);
300     setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
301     setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
302     setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
303 
304     if (!HasMVEFP) {
305       setOperationAction(ISD::SINT_TO_FP, VT, Expand);
306       setOperationAction(ISD::UINT_TO_FP, VT, Expand);
307       setOperationAction(ISD::FP_TO_SINT, VT, Expand);
308       setOperationAction(ISD::FP_TO_UINT, VT, Expand);
309     } else {
310       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
311       setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
312     }
313 
314     // Pre and Post inc are supported on loads and stores
315     for (unsigned im = (unsigned)ISD::PRE_INC;
316          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
317       setIndexedLoadAction(im, VT, Legal);
318       setIndexedStoreAction(im, VT, Legal);
319       setIndexedMaskedLoadAction(im, VT, Legal);
320       setIndexedMaskedStoreAction(im, VT, Legal);
321     }
322   }
323 
324   const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
325   for (auto VT : FloatTypes) {
326     addRegisterClass(VT, &ARM::MQPRRegClass);
327     if (!HasMVEFP)
328       setAllExpand(VT);
329 
330     // These are legal or custom whether we have MVE.fp or not
331     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
332     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
333     setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
334     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
335     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
336     setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
337     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
338     setOperationAction(ISD::SETCC, VT, Custom);
339     setOperationAction(ISD::MLOAD, VT, Custom);
340     setOperationAction(ISD::MSTORE, VT, Legal);
341     setOperationAction(ISD::SELECT, VT, Expand);
342     setOperationAction(ISD::SELECT_CC, VT, Expand);
343 
344     // Pre and Post inc are supported on loads and stores
345     for (unsigned im = (unsigned)ISD::PRE_INC;
346          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
347       setIndexedLoadAction(im, VT, Legal);
348       setIndexedStoreAction(im, VT, Legal);
349       setIndexedMaskedLoadAction(im, VT, Legal);
350       setIndexedMaskedStoreAction(im, VT, Legal);
351     }
352 
353     if (HasMVEFP) {
354       setOperationAction(ISD::FMINNUM, VT, Legal);
355       setOperationAction(ISD::FMAXNUM, VT, Legal);
356       setOperationAction(ISD::FROUND, VT, Legal);
357       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
358       setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom);
359       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
360       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
361 
362       // No native support for these.
363       setOperationAction(ISD::FDIV, VT, Expand);
364       setOperationAction(ISD::FREM, VT, Expand);
365       setOperationAction(ISD::FSQRT, VT, Expand);
366       setOperationAction(ISD::FSIN, VT, Expand);
367       setOperationAction(ISD::FCOS, VT, Expand);
368       setOperationAction(ISD::FPOW, VT, Expand);
369       setOperationAction(ISD::FLOG, VT, Expand);
370       setOperationAction(ISD::FLOG2, VT, Expand);
371       setOperationAction(ISD::FLOG10, VT, Expand);
372       setOperationAction(ISD::FEXP, VT, Expand);
373       setOperationAction(ISD::FEXP2, VT, Expand);
374       setOperationAction(ISD::FEXP10, VT, Expand);
375       setOperationAction(ISD::FNEARBYINT, VT, Expand);
376     }
377   }
378 
379   // Custom Expand smaller than legal vector reductions to prevent false zero
380   // items being added.
381   setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom);
382   setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom);
383   setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom);
384   setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom);
385   setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom);
386   setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom);
387   setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom);
388   setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom);
389 
390   // We 'support' these types up to bitcast/load/store level, regardless of
391   // MVE integer-only / float support. Only doing FP data processing on the FP
392   // vector types is inhibited at integer-only level.
393   const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
394   for (auto VT : LongTypes) {
395     addRegisterClass(VT, &ARM::MQPRRegClass);
396     setAllExpand(VT);
397     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
398     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
399     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
400     setOperationAction(ISD::VSELECT, VT, Legal);
401     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
402   }
403   setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
404 
405   // We can do bitwise operations on v2i64 vectors
406   setOperationAction(ISD::AND, MVT::v2i64, Legal);
407   setOperationAction(ISD::OR, MVT::v2i64, Legal);
408   setOperationAction(ISD::XOR, MVT::v2i64, Legal);
409 
410   // It is legal to extload from v4i8 to v4i16 or v4i32.
411   addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
412   addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
413   addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);
414 
415   // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16.
416   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
417   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
418   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
419   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8,  Legal);
420   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal);
421 
422   // Some truncating stores are legal too.
423   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
424   setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
425   setTruncStoreAction(MVT::v8i16, MVT::v8i8,  Legal);
426 
427   // Pre and Post inc on these are legal, given the correct extends
428   for (unsigned im = (unsigned)ISD::PRE_INC;
429        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
430     for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
431       setIndexedLoadAction(im, VT, Legal);
432       setIndexedStoreAction(im, VT, Legal);
433       setIndexedMaskedLoadAction(im, VT, Legal);
434       setIndexedMaskedStoreAction(im, VT, Legal);
435     }
436   }
437 
438   // Predicate types
439   const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1};
440   for (auto VT : pTypes) {
441     addRegisterClass(VT, &ARM::VCCRRegClass);
442     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
443     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
444     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
445     setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
446     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
447     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
448     setOperationAction(ISD::SETCC, VT, Custom);
449     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
450     setOperationAction(ISD::LOAD, VT, Custom);
451     setOperationAction(ISD::STORE, VT, Custom);
452     setOperationAction(ISD::TRUNCATE, VT, Custom);
453     setOperationAction(ISD::VSELECT, VT, Expand);
454     setOperationAction(ISD::SELECT, VT, Expand);
455     setOperationAction(ISD::SELECT_CC, VT, Expand);
456 
457     if (!HasMVEFP) {
458       setOperationAction(ISD::SINT_TO_FP, VT, Expand);
459       setOperationAction(ISD::UINT_TO_FP, VT, Expand);
460       setOperationAction(ISD::FP_TO_SINT, VT, Expand);
461       setOperationAction(ISD::FP_TO_UINT, VT, Expand);
462     }
463   }
464   setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
465   setOperationAction(ISD::TRUNCATE, MVT::v2i1, Expand);
466   setOperationAction(ISD::AND, MVT::v2i1, Expand);
467   setOperationAction(ISD::OR, MVT::v2i1, Expand);
468   setOperationAction(ISD::XOR, MVT::v2i1, Expand);
469   setOperationAction(ISD::SINT_TO_FP, MVT::v2i1, Expand);
470   setOperationAction(ISD::UINT_TO_FP, MVT::v2i1, Expand);
471   setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Expand);
472   setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Expand);
473 
474   setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
475   setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
476   setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
477   setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
478   setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
479   setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
480   setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
481   setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
482 }
483 
484 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
485                                      const ARMSubtarget &STI)
486     : TargetLowering(TM), Subtarget(&STI) {
487   RegInfo = Subtarget->getRegisterInfo();
488   Itins = Subtarget->getInstrItineraryData();
489 
490   setBooleanContents(ZeroOrOneBooleanContent);
491   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
492 
493   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
494       !Subtarget->isTargetWatchOS() && !Subtarget->isTargetDriverKit()) {
495     bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
496     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
497       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
498                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
499                                        : CallingConv::ARM_AAPCS);
500   }
501 
502   if (Subtarget->isTargetMachO()) {
503     // Uses VFP for Thumb libfuncs if available.
504     if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
505         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
506       static const struct {
507         const RTLIB::Libcall Op;
508         const char * const Name;
509         const ISD::CondCode Cond;
510       } LibraryCalls[] = {
511         // Single-precision floating-point arithmetic.
512         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
513         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
514         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
515         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
516 
517         // Double-precision floating-point arithmetic.
518         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
519         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
520         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
521         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
522 
523         // Single-precision comparisons.
524         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
525         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
526         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
527         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
528         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
529         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
530         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
531 
532         // Double-precision comparisons.
533         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
534         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
535         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
536         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
537         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
538         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
539         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
540 
541         // Floating-point to integer conversions.
542         // i64 conversions are done via library routines even when generating VFP
543         // instructions, so use the same ones.
544         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
545         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
546         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
547         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
548 
549         // Conversions between floating types.
550         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
551         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
552 
553         // Integer to floating-point conversions.
554         // i64 conversions are done via library routines even when generating VFP
555         // instructions, so use the same ones.
556         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
557         // e.g., __floatunsidf vs. __floatunssidfvfp.
558         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
559         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
560         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
561         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
562       };
563 
564       for (const auto &LC : LibraryCalls) {
565         setLibcallName(LC.Op, LC.Name);
566         if (LC.Cond != ISD::SETCC_INVALID)
567           setCmpLibcallCC(LC.Op, LC.Cond);
568       }
569     }
570   }
571 
572   // These libcalls are not available in 32-bit.
573   setLibcallName(RTLIB::SHL_I128, nullptr);
574   setLibcallName(RTLIB::SRL_I128, nullptr);
575   setLibcallName(RTLIB::SRA_I128, nullptr);
576   setLibcallName(RTLIB::MUL_I128, nullptr);
577   setLibcallName(RTLIB::MULO_I64, nullptr);
578   setLibcallName(RTLIB::MULO_I128, nullptr);
579 
580   // RTLIB
581   if (Subtarget->isAAPCS_ABI() &&
582       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
583        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
584     static const struct {
585       const RTLIB::Libcall Op;
586       const char * const Name;
587       const CallingConv::ID CC;
588       const ISD::CondCode Cond;
589     } LibraryCalls[] = {
590       // Double-precision floating-point arithmetic helper functions
591       // RTABI chapter 4.1.2, Table 2
592       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
593       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
594       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
595       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
596 
597       // Double-precision floating-point comparison helper functions
598       // RTABI chapter 4.1.2, Table 3
599       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
600       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
601       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
602       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
603       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
604       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
605       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
606 
607       // Single-precision floating-point arithmetic helper functions
608       // RTABI chapter 4.1.2, Table 4
609       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
610       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
611       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
612       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
613 
614       // Single-precision floating-point comparison helper functions
615       // RTABI chapter 4.1.2, Table 5
616       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
617       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
618       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
619       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
620       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
621       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
622       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
623 
624       // Floating-point to integer conversions.
625       // RTABI chapter 4.1.2, Table 6
626       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
627       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
628       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
629       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
630       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
631       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
632       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
633       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
634 
635       // Conversions between floating types.
636       // RTABI chapter 4.1.2, Table 7
637       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
638       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
639       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
640 
641       // Integer to floating-point conversions.
642       // RTABI chapter 4.1.2, Table 8
643       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
644       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
645       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
646       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
647       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
648       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
649       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
650       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
651 
652       // Long long helper functions
653       // RTABI chapter 4.2, Table 9
654       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
655       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
656       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
657       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
658 
659       // Integer division functions
660       // RTABI chapter 4.3.1
661       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
662       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
663       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
664       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
665       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
666       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
667       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
668       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
669     };
670 
671     for (const auto &LC : LibraryCalls) {
672       setLibcallName(LC.Op, LC.Name);
673       setLibcallCallingConv(LC.Op, LC.CC);
674       if (LC.Cond != ISD::SETCC_INVALID)
675         setCmpLibcallCC(LC.Op, LC.Cond);
676     }
677 
678     // EABI dependent RTLIB
679     if (TM.Options.EABIVersion == EABI::EABI4 ||
680         TM.Options.EABIVersion == EABI::EABI5) {
681       static const struct {
682         const RTLIB::Libcall Op;
683         const char *const Name;
684         const CallingConv::ID CC;
685         const ISD::CondCode Cond;
686       } MemOpsLibraryCalls[] = {
687         // Memory operations
688         // RTABI chapter 4.3.4
689         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
690         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
691         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
692       };
693 
694       for (const auto &LC : MemOpsLibraryCalls) {
695         setLibcallName(LC.Op, LC.Name);
696         setLibcallCallingConv(LC.Op, LC.CC);
697         if (LC.Cond != ISD::SETCC_INVALID)
698           setCmpLibcallCC(LC.Op, LC.Cond);
699       }
700     }
701   }
702 
703   if (Subtarget->isTargetWindows()) {
704     static const struct {
705       const RTLIB::Libcall Op;
706       const char * const Name;
707       const CallingConv::ID CC;
708     } LibraryCalls[] = {
709       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
710       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
711       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
712       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
713       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
714       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
715       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
716       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
717     };
718 
719     for (const auto &LC : LibraryCalls) {
720       setLibcallName(LC.Op, LC.Name);
721       setLibcallCallingConv(LC.Op, LC.CC);
722     }
723   }
724 
725   // Use divmod compiler-rt calls for iOS 5.0 and later.
726   if (Subtarget->isTargetMachO() &&
727       !(Subtarget->isTargetIOS() &&
728         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
729     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
730     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
731   }
732 
733   // The half <-> float conversion functions are always soft-float on
734   // non-watchos platforms, but are needed for some targets which use a
735   // hard-float calling convention by default.
736   if (!Subtarget->isTargetWatchABI()) {
737     if (Subtarget->isAAPCS_ABI()) {
738       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
739       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
740       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
741     } else {
742       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
743       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
744       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
745     }
746   }
747 
748   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
749   // a __gnu_ prefix (which is the default).
750   if (Subtarget->isTargetAEABI()) {
751     static const struct {
752       const RTLIB::Libcall Op;
753       const char * const Name;
754       const CallingConv::ID CC;
755     } LibraryCalls[] = {
756       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
757       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
758       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
759     };
760 
761     for (const auto &LC : LibraryCalls) {
762       setLibcallName(LC.Op, LC.Name);
763       setLibcallCallingConv(LC.Op, LC.CC);
764     }
765   }
766 
767   if (Subtarget->isThumb1Only())
768     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
769   else
770     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
771 
772   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
773       Subtarget->hasFPRegs()) {
774     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
775     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
776 
777     setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
778     setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
779     setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
780     setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
781 
782     if (!Subtarget->hasVFP2Base())
783       setAllExpand(MVT::f32);
784     if (!Subtarget->hasFP64())
785       setAllExpand(MVT::f64);
786   }
787 
788   if (Subtarget->hasFullFP16()) {
789     addRegisterClass(MVT::f16, &ARM::HPRRegClass);
790     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
791     setOperationAction(ISD::BITCAST, MVT::f16, Custom);
792 
793     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
794     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
795   }
796 
797   if (Subtarget->hasBF16()) {
798     addRegisterClass(MVT::bf16, &ARM::HPRRegClass);
799     setAllExpand(MVT::bf16);
800     if (!Subtarget->hasFullFP16())
801       setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
802   }
803 
804   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
805     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
806       setTruncStoreAction(VT, InnerVT, Expand);
807       addAllExtLoads(VT, InnerVT, Expand);
808     }
809 
810     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
811     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
812 
813     setOperationAction(ISD::BSWAP, VT, Expand);
814   }
815 
816   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
817   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
818 
819   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
820   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
821 
822   if (Subtarget->hasMVEIntegerOps())
823     addMVEVectorTypes(Subtarget->hasMVEFloatOps());
824 
825   // Combine low-overhead loop intrinsics so that we can lower i1 types.
826   if (Subtarget->hasLOB()) {
827     setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC});
828   }
829 
830   if (Subtarget->hasNEON()) {
831     addDRTypeForNEON(MVT::v2f32);
832     addDRTypeForNEON(MVT::v8i8);
833     addDRTypeForNEON(MVT::v4i16);
834     addDRTypeForNEON(MVT::v2i32);
835     addDRTypeForNEON(MVT::v1i64);
836 
837     addQRTypeForNEON(MVT::v4f32);
838     addQRTypeForNEON(MVT::v2f64);
839     addQRTypeForNEON(MVT::v16i8);
840     addQRTypeForNEON(MVT::v8i16);
841     addQRTypeForNEON(MVT::v4i32);
842     addQRTypeForNEON(MVT::v2i64);
843 
844     if (Subtarget->hasFullFP16()) {
845       addQRTypeForNEON(MVT::v8f16);
846       addDRTypeForNEON(MVT::v4f16);
847     }
848 
849     if (Subtarget->hasBF16()) {
850       addQRTypeForNEON(MVT::v8bf16);
851       addDRTypeForNEON(MVT::v4bf16);
852     }
853   }
854 
855   if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
856     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
857     // none of Neon, MVE or VFP supports any arithmetic operations on it.
858     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
859     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
860     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
861     // FIXME: Code duplication: FDIV and FREM are expanded always, see
862     // ARMTargetLowering::addTypeForNEON method for details.
863     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
864     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
865     // FIXME: Create unittest.
866     // In another words, find a way when "copysign" appears in DAG with vector
867     // operands.
868     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
869     // FIXME: Code duplication: SETCC has custom operation action, see
870     // ARMTargetLowering::addTypeForNEON method for details.
871     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
872     // FIXME: Create unittest for FNEG and for FABS.
873     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
874     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
875     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
876     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
877     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
878     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
879     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
880     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
881     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
882     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
883     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
884     setOperationAction(ISD::FEXP10, MVT::v2f64, Expand);
885     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
886     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
887     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
888     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
889     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
890     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
891     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
892   }
893 
894   if (Subtarget->hasNEON()) {
895     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
896     // supported for v4f32.
897     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
898     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
899     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
900     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
901     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
902     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
903     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
904     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
905     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
906     setOperationAction(ISD::FEXP10, MVT::v4f32, Expand);
907     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
908     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
909     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
910     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
911     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
912 
913     // Mark v2f32 intrinsics.
914     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
915     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
916     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
917     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
918     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
919     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
920     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
921     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
922     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
923     setOperationAction(ISD::FEXP10, MVT::v2f32, Expand);
924     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
925     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
926     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
927     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
928     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
929 
930     // Neon does not support some operations on v1i64 and v2i64 types.
931     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
932     // Custom handling for some quad-vector types to detect VMULL.
933     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
934     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
935     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
936     // Custom handling for some vector types to avoid expensive expansions
937     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
938     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
939     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
940     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
941     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
942     // a destination type that is wider than the source, and nor does
943     // it have a FP_TO_[SU]INT instruction with a narrower destination than
944     // source.
945     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
946     setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
947     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
948     setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
949     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
950     setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
951     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
952     setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
953 
954     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
955     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
956 
957     // NEON does not have single instruction CTPOP for vectors with element
958     // types wider than 8-bits.  However, custom lowering can leverage the
959     // v8i8/v16i8 vcnt instruction.
960     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
961     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
962     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
963     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
964     setOperationAction(ISD::CTPOP,      MVT::v1i64, Custom);
965     setOperationAction(ISD::CTPOP,      MVT::v2i64, Custom);
966 
967     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
968     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
969 
970     // NEON does not have single instruction CTTZ for vectors.
971     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
972     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
973     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
974     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
975 
976     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
977     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
978     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
979     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
980 
981     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
982     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
983     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
984     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
985 
986     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
987     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
988     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
989     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
990 
991     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
992       setOperationAction(ISD::MULHS, VT, Expand);
993       setOperationAction(ISD::MULHU, VT, Expand);
994     }
995 
996     // NEON only has FMA instructions as of VFP4.
997     if (!Subtarget->hasVFP4Base()) {
998       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
999       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
1000     }
1001 
1002     setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT,
1003                          ISD::FP_TO_UINT, ISD::FDIV, ISD::LOAD});
1004 
1005     // It is legal to extload from v4i8 to v4i16 or v4i32.
1006     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
1007                    MVT::v2i32}) {
1008       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1009         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
1010         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
1011         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
1012       }
1013     }
1014 
1015     for (auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1016                     MVT::v4i32}) {
1017       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1018       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1019       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1020       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1021     }
1022   }
1023 
1024   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1025     setTargetDAGCombine(
1026         {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR,
1027          ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
1028          ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND,
1029          ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN,
1030          ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST});
1031   }
1032   if (Subtarget->hasMVEIntegerOps()) {
1033     setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX,
1034                          ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC,
1035                          ISD::SETCC});
1036   }
1037   if (Subtarget->hasMVEFloatOps()) {
1038     setTargetDAGCombine(ISD::FADD);
1039   }
1040 
1041   if (!Subtarget->hasFP64()) {
1042     // When targeting a floating-point unit with only single-precision
1043     // operations, f64 is legal for the few double-precision instructions which
1044     // are present However, no double-precision operations other than moves,
1045     // loads and stores are provided by the hardware.
1046     setOperationAction(ISD::FADD,       MVT::f64, Expand);
1047     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
1048     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
1049     setOperationAction(ISD::FMA,        MVT::f64, Expand);
1050     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
1051     setOperationAction(ISD::FREM,       MVT::f64, Expand);
1052     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
1053     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
1054     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
1055     setOperationAction(ISD::FABS,       MVT::f64, Expand);
1056     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
1057     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
1058     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
1059     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
1060     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
1061     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
1062     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
1063     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
1064     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
1065     setOperationAction(ISD::FEXP10,      MVT::f64, Expand);
1066     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
1067     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
1068     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
1069     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
1070     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
1071     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1072     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1073     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1074     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1075     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
1076     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
1077     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
1078     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
1079     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
1080     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
1081     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
1082     setOperationAction(ISD::STRICT_FP_ROUND,   MVT::f32, Custom);
1083   }
1084 
1085   if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
1086     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
1087     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
1088     if (Subtarget->hasFullFP16()) {
1089       setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
1090       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
1091     }
1092   }
1093 
1094   if (!Subtarget->hasFP16()) {
1095     setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
1096     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
1097   }
1098 
1099   computeRegisterProperties(Subtarget->getRegisterInfo());
1100 
1101   // ARM does not have floating-point extending loads.
1102   for (MVT VT : MVT::fp_valuetypes()) {
1103     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1104     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1105   }
1106 
1107   // ... or truncating stores
1108   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1109   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1110   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1111 
1112   // ARM does not have i1 sign extending load.
1113   for (MVT VT : MVT::integer_valuetypes())
1114     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1115 
1116   // ARM supports all 4 flavors of integer indexed load / store.
1117   if (!Subtarget->isThumb1Only()) {
1118     for (unsigned im = (unsigned)ISD::PRE_INC;
1119          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1120       setIndexedLoadAction(im,  MVT::i1,  Legal);
1121       setIndexedLoadAction(im,  MVT::i8,  Legal);
1122       setIndexedLoadAction(im,  MVT::i16, Legal);
1123       setIndexedLoadAction(im,  MVT::i32, Legal);
1124       setIndexedStoreAction(im, MVT::i1,  Legal);
1125       setIndexedStoreAction(im, MVT::i8,  Legal);
1126       setIndexedStoreAction(im, MVT::i16, Legal);
1127       setIndexedStoreAction(im, MVT::i32, Legal);
1128     }
1129   } else {
1130     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
1131     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
1132     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
1133   }
1134 
1135   setOperationAction(ISD::SADDO, MVT::i32, Custom);
1136   setOperationAction(ISD::UADDO, MVT::i32, Custom);
1137   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
1138   setOperationAction(ISD::USUBO, MVT::i32, Custom);
1139 
1140   setOperationAction(ISD::UADDO_CARRY, MVT::i32, Custom);
1141   setOperationAction(ISD::USUBO_CARRY, MVT::i32, Custom);
1142   if (Subtarget->hasDSP()) {
1143     setOperationAction(ISD::SADDSAT, MVT::i8, Custom);
1144     setOperationAction(ISD::SSUBSAT, MVT::i8, Custom);
1145     setOperationAction(ISD::SADDSAT, MVT::i16, Custom);
1146     setOperationAction(ISD::SSUBSAT, MVT::i16, Custom);
1147     setOperationAction(ISD::UADDSAT, MVT::i8, Custom);
1148     setOperationAction(ISD::USUBSAT, MVT::i8, Custom);
1149     setOperationAction(ISD::UADDSAT, MVT::i16, Custom);
1150     setOperationAction(ISD::USUBSAT, MVT::i16, Custom);
1151   }
1152   if (Subtarget->hasBaseDSP()) {
1153     setOperationAction(ISD::SADDSAT, MVT::i32, Legal);
1154     setOperationAction(ISD::SSUBSAT, MVT::i32, Legal);
1155   }
1156 
1157   // i64 operation support.
1158   setOperationAction(ISD::MUL,     MVT::i64, Expand);
1159   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
1160   if (Subtarget->isThumb1Only()) {
1161     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1162     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1163   }
1164   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
1165       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
1166     setOperationAction(ISD::MULHS, MVT::i32, Expand);
1167 
1168   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
1169   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
1170   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
1171   setOperationAction(ISD::SRL,       MVT::i64, Custom);
1172   setOperationAction(ISD::SRA,       MVT::i64, Custom);
1173   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1174   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1175   setOperationAction(ISD::LOAD, MVT::i64, Custom);
1176   setOperationAction(ISD::STORE, MVT::i64, Custom);
1177 
1178   // MVE lowers 64 bit shifts to lsll and lsrl
1179   // assuming that ISD::SRL and SRA of i64 are already marked custom
1180   if (Subtarget->hasMVEIntegerOps())
1181     setOperationAction(ISD::SHL, MVT::i64, Custom);
1182 
1183   // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
1184   if (Subtarget->isThumb1Only()) {
1185     setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1186     setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1187     setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1188   }
1189 
1190   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
1191     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1192 
1193   // ARM does not have ROTL.
1194   setOperationAction(ISD::ROTL, MVT::i32, Expand);
1195   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1196     setOperationAction(ISD::ROTL, VT, Expand);
1197     setOperationAction(ISD::ROTR, VT, Expand);
1198   }
1199   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
1200   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1201   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
1202     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1203     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
1204   }
1205 
1206   // @llvm.readcyclecounter requires the Performance Monitors extension.
1207   // Default to the 0 expansion on unsupported platforms.
1208   // FIXME: Technically there are older ARM CPUs that have
1209   // implementation-specific ways of obtaining this information.
1210   if (Subtarget->hasPerfMon())
1211     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1212 
1213   // Only ARMv6 has BSWAP.
1214   if (!Subtarget->hasV6Ops())
1215     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1216 
1217   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1218                                         : Subtarget->hasDivideInARMMode();
1219   if (!hasDivide) {
1220     // These are expanded into libcalls if the cpu doesn't have HW divider.
1221     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
1222     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
1223   }
1224 
1225   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1226     setOperationAction(ISD::SDIV, MVT::i32, Custom);
1227     setOperationAction(ISD::UDIV, MVT::i32, Custom);
1228 
1229     setOperationAction(ISD::SDIV, MVT::i64, Custom);
1230     setOperationAction(ISD::UDIV, MVT::i64, Custom);
1231   }
1232 
1233   setOperationAction(ISD::SREM,  MVT::i32, Expand);
1234   setOperationAction(ISD::UREM,  MVT::i32, Expand);
1235 
1236   // Register based DivRem for AEABI (RTABI 4.2)
1237   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
1238       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
1239       Subtarget->isTargetWindows()) {
1240     setOperationAction(ISD::SREM, MVT::i64, Custom);
1241     setOperationAction(ISD::UREM, MVT::i64, Custom);
1242     HasStandaloneRem = false;
1243 
1244     if (Subtarget->isTargetWindows()) {
1245       const struct {
1246         const RTLIB::Libcall Op;
1247         const char * const Name;
1248         const CallingConv::ID CC;
1249       } LibraryCalls[] = {
1250         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1251         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1252         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1253         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1254 
1255         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1256         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1257         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1258         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1259       };
1260 
1261       for (const auto &LC : LibraryCalls) {
1262         setLibcallName(LC.Op, LC.Name);
1263         setLibcallCallingConv(LC.Op, LC.CC);
1264       }
1265     } else {
1266       const struct {
1267         const RTLIB::Libcall Op;
1268         const char * const Name;
1269         const CallingConv::ID CC;
1270       } LibraryCalls[] = {
1271         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1272         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1273         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1274         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1275 
1276         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1277         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1278         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1279         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1280       };
1281 
1282       for (const auto &LC : LibraryCalls) {
1283         setLibcallName(LC.Op, LC.Name);
1284         setLibcallCallingConv(LC.Op, LC.CC);
1285       }
1286     }
1287 
1288     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1289     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1290     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1291     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1292   } else {
1293     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1294     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1295   }
1296 
1297   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
1298     // MSVCRT doesn't have powi; fall back to pow
1299     setLibcallName(RTLIB::POWI_F32, nullptr);
1300     setLibcallName(RTLIB::POWI_F64, nullptr);
1301   }
1302 
1303   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
1304   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
1305   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1306   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1307 
1308   setOperationAction(ISD::TRAP, MVT::Other, Legal);
1309   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
1310 
1311   // Use the default implementation.
1312   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
1313   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
1314   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
1315   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
1316   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
1317   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
1318 
1319   if (Subtarget->isTargetWindows())
1320     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1321   else
1322     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1323 
1324   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1325   // the default expansion.
1326   InsertFencesForAtomic = false;
1327   if (Subtarget->hasAnyDataBarrier() &&
1328       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1329     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1330     // to ldrex/strex loops already.
1331     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
1332     if (!Subtarget->isThumb() || !Subtarget->isMClass())
1333       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
1334 
1335     // On v8, we have particularly efficient implementations of atomic fences
1336     // if they can be combined with nearby atomic loads and stores.
1337     if (!Subtarget->hasAcquireRelease() ||
1338         getTargetMachine().getOptLevel() == CodeGenOptLevel::None) {
1339       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1340       InsertFencesForAtomic = true;
1341     }
1342   } else {
1343     // If there's anything we can use as a barrier, go through custom lowering
1344     // for ATOMIC_FENCE.
1345     // If target has DMB in thumb, Fences can be inserted.
1346     if (Subtarget->hasDataBarrier())
1347       InsertFencesForAtomic = true;
1348 
1349     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
1350                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1351 
1352     // Set them all for libcall, which will force libcalls.
1353     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
1354     setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
1355     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
1356     setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, LibCall);
1357     setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, LibCall);
1358     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
1359     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
1360     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, LibCall);
1361     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, LibCall);
1362     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, LibCall);
1363     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, LibCall);
1364     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, LibCall);
1365     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1366     // Unordered/Monotonic case.
1367     if (!InsertFencesForAtomic) {
1368       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1369       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1370     }
1371   }
1372 
1373   // Compute supported atomic widths.
1374   if (Subtarget->isTargetLinux() ||
1375       (!Subtarget->isMClass() && Subtarget->hasV6Ops())) {
1376     // For targets where __sync_* routines are reliably available, we use them
1377     // if necessary.
1378     //
1379     // ARM Linux always supports 64-bit atomics through kernel-assisted atomic
1380     // routines (kernel 3.1 or later). FIXME: Not with compiler-rt?
1381     //
1382     // ARMv6 targets have native instructions in ARM mode. For Thumb mode,
1383     // such targets should provide __sync_* routines, which use the ARM mode
1384     // instructions. (ARMv6 doesn't have dmb, but it has an equivalent
1385     // encoding; see ARMISD::MEMBARRIER_MCR.)
1386     setMaxAtomicSizeInBitsSupported(64);
1387   } else if ((Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) ||
1388              Subtarget->hasForced32BitAtomics()) {
1389     // Cortex-M (besides Cortex-M0) have 32-bit atomics.
1390     setMaxAtomicSizeInBitsSupported(32);
1391   } else {
1392     // We can't assume anything about other targets; just use libatomic
1393     // routines.
1394     setMaxAtomicSizeInBitsSupported(0);
1395   }
1396 
1397   setMaxDivRemBitWidthSupported(64);
1398 
1399   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1400 
1401   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1402   if (!Subtarget->hasV6Ops()) {
1403     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1404     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1405   }
1406   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1407 
1408   if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1409       !Subtarget->isThumb1Only()) {
1410     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1411     // iff target supports vfp2.
1412     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1413     setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
1414     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
1415     setOperationAction(ISD::GET_FPENV, MVT::i32, Legal);
1416     setOperationAction(ISD::SET_FPENV, MVT::i32, Legal);
1417     setOperationAction(ISD::RESET_FPENV, MVT::Other, Legal);
1418   }
1419 
1420   // We want to custom lower some of our intrinsics.
1421   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1422   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1423   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1424   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1425   if (Subtarget->useSjLjEH())
1426     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1427 
1428   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1429   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1430   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1431   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1432   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1433   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1434   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1435   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1436   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1437   if (Subtarget->hasFullFP16()) {
1438     setOperationAction(ISD::SETCC,     MVT::f16, Expand);
1439     setOperationAction(ISD::SELECT,    MVT::f16, Custom);
1440     setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
1441   }
1442 
1443   setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);
1444 
1445   setOperationAction(ISD::BRCOND,    MVT::Other, Custom);
1446   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1447   if (Subtarget->hasFullFP16())
1448       setOperationAction(ISD::BR_CC, MVT::f16,   Custom);
1449   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1450   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1451   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1452 
1453   // We don't support sin/cos/fmod/copysign/pow
1454   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1455   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1456   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1457   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1458   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1459   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1460   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1461   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1462   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
1463       !Subtarget->isThumb1Only()) {
1464     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1465     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1466   }
1467   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1468   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1469 
1470   if (!Subtarget->hasVFP4Base()) {
1471     setOperationAction(ISD::FMA, MVT::f64, Expand);
1472     setOperationAction(ISD::FMA, MVT::f32, Expand);
1473   }
1474 
1475   // Various VFP goodness
1476   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1477     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1478     if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
1479       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1480       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1481     }
1482 
1483     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1484     if (!Subtarget->hasFP16()) {
1485       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1486       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1487     }
1488 
1489     // Strict floating-point comparisons need custom lowering.
1490     setOperationAction(ISD::STRICT_FSETCC,  MVT::f16, Custom);
1491     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
1492     setOperationAction(ISD::STRICT_FSETCC,  MVT::f32, Custom);
1493     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
1494     setOperationAction(ISD::STRICT_FSETCC,  MVT::f64, Custom);
1495     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
1496   }
1497 
1498   // Use __sincos_stret if available.
1499   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1500       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1501     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1502     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1503   }
1504 
1505   // FP-ARMv8 implements a lot of rounding-like FP operations.
1506   if (Subtarget->hasFPARMv8Base()) {
1507     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1508     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1509     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1510     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1511     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1512     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1513     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1514     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1515     if (Subtarget->hasNEON()) {
1516       setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1517       setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1518       setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1519       setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1520     }
1521 
1522     if (Subtarget->hasFP64()) {
1523       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1524       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1525       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1526       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1527       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1528       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1529       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1530       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1531     }
1532   }
1533 
1534   // FP16 often need to be promoted to call lib functions
1535   if (Subtarget->hasFullFP16()) {
1536     setOperationAction(ISD::FREM, MVT::f16, Promote);
1537     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
1538     setOperationAction(ISD::FSIN, MVT::f16, Promote);
1539     setOperationAction(ISD::FCOS, MVT::f16, Promote);
1540     setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
1541     setOperationAction(ISD::FPOWI, MVT::f16, Promote);
1542     setOperationAction(ISD::FPOW, MVT::f16, Promote);
1543     setOperationAction(ISD::FEXP, MVT::f16, Promote);
1544     setOperationAction(ISD::FEXP2, MVT::f16, Promote);
1545     setOperationAction(ISD::FEXP10, MVT::f16, Promote);
1546     setOperationAction(ISD::FLOG, MVT::f16, Promote);
1547     setOperationAction(ISD::FLOG10, MVT::f16, Promote);
1548     setOperationAction(ISD::FLOG2, MVT::f16, Promote);
1549 
1550     setOperationAction(ISD::FROUND, MVT::f16, Legal);
1551   }
1552 
1553   if (Subtarget->hasNEON()) {
1554     // vmin and vmax aren't available in a scalar form, so we can use
1555     // a NEON instruction with an undef lane instead.  This has a performance
1556     // penalty on some cores, so we don't do this unless we have been
1557     // asked to by the core tuning model.
1558     if (Subtarget->useNEONForSinglePrecisionFP()) {
1559       setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
1560       setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
1561       setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
1562       setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
1563     }
1564     setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
1565     setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
1566     setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
1567     setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
1568 
1569     if (Subtarget->hasFullFP16()) {
1570       setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
1571       setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
1572       setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
1573       setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);
1574 
1575       setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
1576       setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
1577       setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
1578       setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
1579     }
1580   }
1581 
1582   // We have target-specific dag combine patterns for the following nodes:
1583   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1584   setTargetDAGCombine(
1585       {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR});
1586 
1587   if (Subtarget->hasMVEIntegerOps())
1588     setTargetDAGCombine(ISD::VSELECT);
1589 
1590   if (Subtarget->hasV6Ops())
1591     setTargetDAGCombine(ISD::SRL);
1592   if (Subtarget->isThumb1Only())
1593     setTargetDAGCombine(ISD::SHL);
1594   // Attempt to lower smin/smax to ssat/usat
1595   if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) ||
1596       Subtarget->isThumb2()) {
1597     setTargetDAGCombine({ISD::SMIN, ISD::SMAX});
1598   }
1599 
1600   setStackPointerRegisterToSaveRestore(ARM::SP);
1601 
1602   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1603       !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
1604     setSchedulingPreference(Sched::RegPressure);
1605   else
1606     setSchedulingPreference(Sched::Hybrid);
1607 
1608   //// temporary - rewrite interface to use type
1609   MaxStoresPerMemset = 8;
1610   MaxStoresPerMemsetOptSize = 4;
1611   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1612   MaxStoresPerMemcpyOptSize = 2;
1613   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1614   MaxStoresPerMemmoveOptSize = 2;
1615 
1616   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1617   // are at least 4 bytes aligned.
1618   setMinStackArgumentAlignment(Align(4));
1619 
1620   // Prefer likely predicted branches to selects on out-of-order cores.
1621   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1622 
1623   setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
1624   setPrefFunctionAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
1625 
1626   setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
1627 
1628   if (Subtarget->isThumb() || Subtarget->isThumb2())
1629     setTargetDAGCombine(ISD::ABS);
1630 }
1631 
1632 bool ARMTargetLowering::useSoftFloat() const {
1633   return Subtarget->useSoftFloat();
1634 }
1635 
1636 // FIXME: It might make sense to define the representative register class as the
1637 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1638 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1639 // SPR's representative would be DPR_VFP2. This should work well if register
1640 // pressure tracking were modified such that a register use would increment the
1641 // pressure of the register class's representative and all of it's super
1642 // classes' representatives transitively. We have not implemented this because
1643 // of the difficulty prior to coalescing of modeling operand register classes
1644 // due to the common occurrence of cross class copies and subregister insertions
1645 // and extractions.
1646 std::pair<const TargetRegisterClass *, uint8_t>
1647 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1648                                            MVT VT) const {
1649   const TargetRegisterClass *RRC = nullptr;
1650   uint8_t Cost = 1;
1651   switch (VT.SimpleTy) {
1652   default:
1653     return TargetLowering::findRepresentativeClass(TRI, VT);
1654   // Use DPR as representative register class for all floating point
1655   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1656   // the cost is 1 for both f32 and f64.
1657   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1658   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1659     RRC = &ARM::DPRRegClass;
1660     // When NEON is used for SP, only half of the register file is available
1661     // because operations that define both SP and DP results will be constrained
1662     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1663     // coalescing by double-counting the SP regs. See the FIXME above.
1664     if (Subtarget->useNEONForSinglePrecisionFP())
1665       Cost = 2;
1666     break;
1667   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1668   case MVT::v4f32: case MVT::v2f64:
1669     RRC = &ARM::DPRRegClass;
1670     Cost = 2;
1671     break;
1672   case MVT::v4i64:
1673     RRC = &ARM::DPRRegClass;
1674     Cost = 4;
1675     break;
1676   case MVT::v8i64:
1677     RRC = &ARM::DPRRegClass;
1678     Cost = 8;
1679     break;
1680   }
1681   return std::make_pair(RRC, Cost);
1682 }
1683 
1684 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1685 #define MAKE_CASE(V)                                                           \
1686   case V:                                                                      \
1687     return #V;
1688   switch ((ARMISD::NodeType)Opcode) {
1689   case ARMISD::FIRST_NUMBER:
1690     break;
1691     MAKE_CASE(ARMISD::Wrapper)
1692     MAKE_CASE(ARMISD::WrapperPIC)
1693     MAKE_CASE(ARMISD::WrapperJT)
1694     MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL)
1695     MAKE_CASE(ARMISD::CALL)
1696     MAKE_CASE(ARMISD::CALL_PRED)
1697     MAKE_CASE(ARMISD::CALL_NOLINK)
1698     MAKE_CASE(ARMISD::tSECALL)
1699     MAKE_CASE(ARMISD::t2CALL_BTI)
1700     MAKE_CASE(ARMISD::BRCOND)
1701     MAKE_CASE(ARMISD::BR_JT)
1702     MAKE_CASE(ARMISD::BR2_JT)
1703     MAKE_CASE(ARMISD::RET_GLUE)
1704     MAKE_CASE(ARMISD::SERET_GLUE)
1705     MAKE_CASE(ARMISD::INTRET_GLUE)
1706     MAKE_CASE(ARMISD::PIC_ADD)
1707     MAKE_CASE(ARMISD::CMP)
1708     MAKE_CASE(ARMISD::CMN)
1709     MAKE_CASE(ARMISD::CMPZ)
1710     MAKE_CASE(ARMISD::CMPFP)
1711     MAKE_CASE(ARMISD::CMPFPE)
1712     MAKE_CASE(ARMISD::CMPFPw0)
1713     MAKE_CASE(ARMISD::CMPFPEw0)
1714     MAKE_CASE(ARMISD::BCC_i64)
1715     MAKE_CASE(ARMISD::FMSTAT)
1716     MAKE_CASE(ARMISD::CMOV)
1717     MAKE_CASE(ARMISD::SUBS)
1718     MAKE_CASE(ARMISD::SSAT)
1719     MAKE_CASE(ARMISD::USAT)
1720     MAKE_CASE(ARMISD::ASRL)
1721     MAKE_CASE(ARMISD::LSRL)
1722     MAKE_CASE(ARMISD::LSLL)
1723     MAKE_CASE(ARMISD::SRL_GLUE)
1724     MAKE_CASE(ARMISD::SRA_GLUE)
1725     MAKE_CASE(ARMISD::RRX)
1726     MAKE_CASE(ARMISD::ADDC)
1727     MAKE_CASE(ARMISD::ADDE)
1728     MAKE_CASE(ARMISD::SUBC)
1729     MAKE_CASE(ARMISD::SUBE)
1730     MAKE_CASE(ARMISD::LSLS)
1731     MAKE_CASE(ARMISD::VMOVRRD)
1732     MAKE_CASE(ARMISD::VMOVDRR)
1733     MAKE_CASE(ARMISD::VMOVhr)
1734     MAKE_CASE(ARMISD::VMOVrh)
1735     MAKE_CASE(ARMISD::VMOVSR)
1736     MAKE_CASE(ARMISD::EH_SJLJ_SETJMP)
1737     MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP)
1738     MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH)
1739     MAKE_CASE(ARMISD::TC_RETURN)
1740     MAKE_CASE(ARMISD::THREAD_POINTER)
1741     MAKE_CASE(ARMISD::DYN_ALLOC)
1742     MAKE_CASE(ARMISD::MEMBARRIER_MCR)
1743     MAKE_CASE(ARMISD::PRELOAD)
1744     MAKE_CASE(ARMISD::LDRD)
1745     MAKE_CASE(ARMISD::STRD)
1746     MAKE_CASE(ARMISD::WIN__CHKSTK)
1747     MAKE_CASE(ARMISD::WIN__DBZCHK)
1748     MAKE_CASE(ARMISD::PREDICATE_CAST)
1749     MAKE_CASE(ARMISD::VECTOR_REG_CAST)
1750     MAKE_CASE(ARMISD::MVESEXT)
1751     MAKE_CASE(ARMISD::MVEZEXT)
1752     MAKE_CASE(ARMISD::MVETRUNC)
1753     MAKE_CASE(ARMISD::VCMP)
1754     MAKE_CASE(ARMISD::VCMPZ)
1755     MAKE_CASE(ARMISD::VTST)
1756     MAKE_CASE(ARMISD::VSHLs)
1757     MAKE_CASE(ARMISD::VSHLu)
1758     MAKE_CASE(ARMISD::VSHLIMM)
1759     MAKE_CASE(ARMISD::VSHRsIMM)
1760     MAKE_CASE(ARMISD::VSHRuIMM)
1761     MAKE_CASE(ARMISD::VRSHRsIMM)
1762     MAKE_CASE(ARMISD::VRSHRuIMM)
1763     MAKE_CASE(ARMISD::VRSHRNIMM)
1764     MAKE_CASE(ARMISD::VQSHLsIMM)
1765     MAKE_CASE(ARMISD::VQSHLuIMM)
1766     MAKE_CASE(ARMISD::VQSHLsuIMM)
1767     MAKE_CASE(ARMISD::VQSHRNsIMM)
1768     MAKE_CASE(ARMISD::VQSHRNuIMM)
1769     MAKE_CASE(ARMISD::VQSHRNsuIMM)
1770     MAKE_CASE(ARMISD::VQRSHRNsIMM)
1771     MAKE_CASE(ARMISD::VQRSHRNuIMM)
1772     MAKE_CASE(ARMISD::VQRSHRNsuIMM)
1773     MAKE_CASE(ARMISD::VSLIIMM)
1774     MAKE_CASE(ARMISD::VSRIIMM)
1775     MAKE_CASE(ARMISD::VGETLANEu)
1776     MAKE_CASE(ARMISD::VGETLANEs)
1777     MAKE_CASE(ARMISD::VMOVIMM)
1778     MAKE_CASE(ARMISD::VMVNIMM)
1779     MAKE_CASE(ARMISD::VMOVFPIMM)
1780     MAKE_CASE(ARMISD::VDUP)
1781     MAKE_CASE(ARMISD::VDUPLANE)
1782     MAKE_CASE(ARMISD::VEXT)
1783     MAKE_CASE(ARMISD::VREV64)
1784     MAKE_CASE(ARMISD::VREV32)
1785     MAKE_CASE(ARMISD::VREV16)
1786     MAKE_CASE(ARMISD::VZIP)
1787     MAKE_CASE(ARMISD::VUZP)
1788     MAKE_CASE(ARMISD::VTRN)
1789     MAKE_CASE(ARMISD::VTBL1)
1790     MAKE_CASE(ARMISD::VTBL2)
1791     MAKE_CASE(ARMISD::VMOVN)
1792     MAKE_CASE(ARMISD::VQMOVNs)
1793     MAKE_CASE(ARMISD::VQMOVNu)
1794     MAKE_CASE(ARMISD::VCVTN)
1795     MAKE_CASE(ARMISD::VCVTL)
1796     MAKE_CASE(ARMISD::VIDUP)
1797     MAKE_CASE(ARMISD::VMULLs)
1798     MAKE_CASE(ARMISD::VMULLu)
1799     MAKE_CASE(ARMISD::VQDMULH)
1800     MAKE_CASE(ARMISD::VADDVs)
1801     MAKE_CASE(ARMISD::VADDVu)
1802     MAKE_CASE(ARMISD::VADDVps)
1803     MAKE_CASE(ARMISD::VADDVpu)
1804     MAKE_CASE(ARMISD::VADDLVs)
1805     MAKE_CASE(ARMISD::VADDLVu)
1806     MAKE_CASE(ARMISD::VADDLVAs)
1807     MAKE_CASE(ARMISD::VADDLVAu)
1808     MAKE_CASE(ARMISD::VADDLVps)
1809     MAKE_CASE(ARMISD::VADDLVpu)
1810     MAKE_CASE(ARMISD::VADDLVAps)
1811     MAKE_CASE(ARMISD::VADDLVApu)
1812     MAKE_CASE(ARMISD::VMLAVs)
1813     MAKE_CASE(ARMISD::VMLAVu)
1814     MAKE_CASE(ARMISD::VMLAVps)
1815     MAKE_CASE(ARMISD::VMLAVpu)
1816     MAKE_CASE(ARMISD::VMLALVs)
1817     MAKE_CASE(ARMISD::VMLALVu)
1818     MAKE_CASE(ARMISD::VMLALVps)
1819     MAKE_CASE(ARMISD::VMLALVpu)
1820     MAKE_CASE(ARMISD::VMLALVAs)
1821     MAKE_CASE(ARMISD::VMLALVAu)
1822     MAKE_CASE(ARMISD::VMLALVAps)
1823     MAKE_CASE(ARMISD::VMLALVApu)
1824     MAKE_CASE(ARMISD::VMINVu)
1825     MAKE_CASE(ARMISD::VMINVs)
1826     MAKE_CASE(ARMISD::VMAXVu)
1827     MAKE_CASE(ARMISD::VMAXVs)
1828     MAKE_CASE(ARMISD::UMAAL)
1829     MAKE_CASE(ARMISD::UMLAL)
1830     MAKE_CASE(ARMISD::SMLAL)
1831     MAKE_CASE(ARMISD::SMLALBB)
1832     MAKE_CASE(ARMISD::SMLALBT)
1833     MAKE_CASE(ARMISD::SMLALTB)
1834     MAKE_CASE(ARMISD::SMLALTT)
1835     MAKE_CASE(ARMISD::SMULWB)
1836     MAKE_CASE(ARMISD::SMULWT)
1837     MAKE_CASE(ARMISD::SMLALD)
1838     MAKE_CASE(ARMISD::SMLALDX)
1839     MAKE_CASE(ARMISD::SMLSLD)
1840     MAKE_CASE(ARMISD::SMLSLDX)
1841     MAKE_CASE(ARMISD::SMMLAR)
1842     MAKE_CASE(ARMISD::SMMLSR)
1843     MAKE_CASE(ARMISD::QADD16b)
1844     MAKE_CASE(ARMISD::QSUB16b)
1845     MAKE_CASE(ARMISD::QADD8b)
1846     MAKE_CASE(ARMISD::QSUB8b)
1847     MAKE_CASE(ARMISD::UQADD16b)
1848     MAKE_CASE(ARMISD::UQSUB16b)
1849     MAKE_CASE(ARMISD::UQADD8b)
1850     MAKE_CASE(ARMISD::UQSUB8b)
1851     MAKE_CASE(ARMISD::BUILD_VECTOR)
1852     MAKE_CASE(ARMISD::BFI)
1853     MAKE_CASE(ARMISD::VORRIMM)
1854     MAKE_CASE(ARMISD::VBICIMM)
1855     MAKE_CASE(ARMISD::VBSP)
1856     MAKE_CASE(ARMISD::MEMCPY)
1857     MAKE_CASE(ARMISD::VLD1DUP)
1858     MAKE_CASE(ARMISD::VLD2DUP)
1859     MAKE_CASE(ARMISD::VLD3DUP)
1860     MAKE_CASE(ARMISD::VLD4DUP)
1861     MAKE_CASE(ARMISD::VLD1_UPD)
1862     MAKE_CASE(ARMISD::VLD2_UPD)
1863     MAKE_CASE(ARMISD::VLD3_UPD)
1864     MAKE_CASE(ARMISD::VLD4_UPD)
1865     MAKE_CASE(ARMISD::VLD1x2_UPD)
1866     MAKE_CASE(ARMISD::VLD1x3_UPD)
1867     MAKE_CASE(ARMISD::VLD1x4_UPD)
1868     MAKE_CASE(ARMISD::VLD2LN_UPD)
1869     MAKE_CASE(ARMISD::VLD3LN_UPD)
1870     MAKE_CASE(ARMISD::VLD4LN_UPD)
1871     MAKE_CASE(ARMISD::VLD1DUP_UPD)
1872     MAKE_CASE(ARMISD::VLD2DUP_UPD)
1873     MAKE_CASE(ARMISD::VLD3DUP_UPD)
1874     MAKE_CASE(ARMISD::VLD4DUP_UPD)
1875     MAKE_CASE(ARMISD::VST1_UPD)
1876     MAKE_CASE(ARMISD::VST2_UPD)
1877     MAKE_CASE(ARMISD::VST3_UPD)
1878     MAKE_CASE(ARMISD::VST4_UPD)
1879     MAKE_CASE(ARMISD::VST1x2_UPD)
1880     MAKE_CASE(ARMISD::VST1x3_UPD)
1881     MAKE_CASE(ARMISD::VST1x4_UPD)
1882     MAKE_CASE(ARMISD::VST2LN_UPD)
1883     MAKE_CASE(ARMISD::VST3LN_UPD)
1884     MAKE_CASE(ARMISD::VST4LN_UPD)
1885     MAKE_CASE(ARMISD::WLS)
1886     MAKE_CASE(ARMISD::WLSSETUP)
1887     MAKE_CASE(ARMISD::LE)
1888     MAKE_CASE(ARMISD::LOOP_DEC)
1889     MAKE_CASE(ARMISD::CSINV)
1890     MAKE_CASE(ARMISD::CSNEG)
1891     MAKE_CASE(ARMISD::CSINC)
1892     MAKE_CASE(ARMISD::MEMCPYLOOP)
1893     MAKE_CASE(ARMISD::MEMSETLOOP)
1894 #undef MAKE_CASE
1895   }
1896   return nullptr;
1897 }
1898 
1899 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1900                                           EVT VT) const {
1901   if (!VT.isVector())
1902     return getPointerTy(DL);
1903 
1904   // MVE has a predicate register.
1905   if ((Subtarget->hasMVEIntegerOps() &&
1906        (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
1907         VT == MVT::v16i8)) ||
1908       (Subtarget->hasMVEFloatOps() &&
1909        (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16)))
1910     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1911   return VT.changeVectorElementTypeToInteger();
1912 }
1913 
1914 /// getRegClassFor - Return the register class that should be used for the
1915 /// specified value type.
1916 const TargetRegisterClass *
1917 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
1918   (void)isDivergent;
1919   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1920   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1921   // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1922   // MVE Q registers.
1923   if (Subtarget->hasNEON()) {
1924     if (VT == MVT::v4i64)
1925       return &ARM::QQPRRegClass;
1926     if (VT == MVT::v8i64)
1927       return &ARM::QQQQPRRegClass;
1928   }
1929   if (Subtarget->hasMVEIntegerOps()) {
1930     if (VT == MVT::v4i64)
1931       return &ARM::MQQPRRegClass;
1932     if (VT == MVT::v8i64)
1933       return &ARM::MQQQQPRRegClass;
1934   }
1935   return TargetLowering::getRegClassFor(VT);
1936 }
1937 
1938 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1939 // source/dest is aligned and the copy size is large enough. We therefore want
1940 // to align such objects passed to memory intrinsics.
1941 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1942                                                Align &PrefAlign) const {
1943   if (!isa<MemIntrinsic>(CI))
1944     return false;
1945   MinSize = 8;
1946   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1947   // cycle faster than 4-byte aligned LDM.
1948   PrefAlign =
1949       (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? Align(8) : Align(4));
1950   return true;
1951 }
1952 
1953 // Create a fast isel object.
1954 FastISel *
1955 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1956                                   const TargetLibraryInfo *libInfo) const {
1957   return ARM::createFastISel(funcInfo, libInfo);
1958 }
1959 
1960 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1961   unsigned NumVals = N->getNumValues();
1962   if (!NumVals)
1963     return Sched::RegPressure;
1964 
1965   for (unsigned i = 0; i != NumVals; ++i) {
1966     EVT VT = N->getValueType(i);
1967     if (VT == MVT::Glue || VT == MVT::Other)
1968       continue;
1969     if (VT.isFloatingPoint() || VT.isVector())
1970       return Sched::ILP;
1971   }
1972 
1973   if (!N->isMachineOpcode())
1974     return Sched::RegPressure;
1975 
1976   // Load are scheduled for latency even if there instruction itinerary
1977   // is not available.
1978   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1979   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1980 
1981   if (MCID.getNumDefs() == 0)
1982     return Sched::RegPressure;
1983   if (!Itins->isEmpty() &&
1984       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2U)
1985     return Sched::ILP;
1986 
1987   return Sched::RegPressure;
1988 }
1989 
1990 //===----------------------------------------------------------------------===//
1991 // Lowering Code
1992 //===----------------------------------------------------------------------===//
1993 
1994 static bool isSRL16(const SDValue &Op) {
1995   if (Op.getOpcode() != ISD::SRL)
1996     return false;
1997   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1998     return Const->getZExtValue() == 16;
1999   return false;
2000 }
2001 
2002 static bool isSRA16(const SDValue &Op) {
2003   if (Op.getOpcode() != ISD::SRA)
2004     return false;
2005   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2006     return Const->getZExtValue() == 16;
2007   return false;
2008 }
2009 
2010 static bool isSHL16(const SDValue &Op) {
2011   if (Op.getOpcode() != ISD::SHL)
2012     return false;
2013   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2014     return Const->getZExtValue() == 16;
2015   return false;
2016 }
2017 
2018 // Check for a signed 16-bit value. We special case SRA because it makes it
2019 // more simple when also looking for SRAs that aren't sign extending a
2020 // smaller value. Without the check, we'd need to take extra care with
2021 // checking order for some operations.
2022 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
2023   if (isSRA16(Op))
2024     return isSHL16(Op.getOperand(0));
2025   return DAG.ComputeNumSignBits(Op) == 17;
2026 }
2027 
2028 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
2029 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
2030   switch (CC) {
2031   default: llvm_unreachable("Unknown condition code!");
2032   case ISD::SETNE:  return ARMCC::NE;
2033   case ISD::SETEQ:  return ARMCC::EQ;
2034   case ISD::SETGT:  return ARMCC::GT;
2035   case ISD::SETGE:  return ARMCC::GE;
2036   case ISD::SETLT:  return ARMCC::LT;
2037   case ISD::SETLE:  return ARMCC::LE;
2038   case ISD::SETUGT: return ARMCC::HI;
2039   case ISD::SETUGE: return ARMCC::HS;
2040   case ISD::SETULT: return ARMCC::LO;
2041   case ISD::SETULE: return ARMCC::LS;
2042   }
2043 }
2044 
2045 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
2046 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
2047                         ARMCC::CondCodes &CondCode2) {
2048   CondCode2 = ARMCC::AL;
2049   switch (CC) {
2050   default: llvm_unreachable("Unknown FP condition!");
2051   case ISD::SETEQ:
2052   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
2053   case ISD::SETGT:
2054   case ISD::SETOGT: CondCode = ARMCC::GT; break;
2055   case ISD::SETGE:
2056   case ISD::SETOGE: CondCode = ARMCC::GE; break;
2057   case ISD::SETOLT: CondCode = ARMCC::MI; break;
2058   case ISD::SETOLE: CondCode = ARMCC::LS; break;
2059   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
2060   case ISD::SETO:   CondCode = ARMCC::VC; break;
2061   case ISD::SETUO:  CondCode = ARMCC::VS; break;
2062   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
2063   case ISD::SETUGT: CondCode = ARMCC::HI; break;
2064   case ISD::SETUGE: CondCode = ARMCC::PL; break;
2065   case ISD::SETLT:
2066   case ISD::SETULT: CondCode = ARMCC::LT; break;
2067   case ISD::SETLE:
2068   case ISD::SETULE: CondCode = ARMCC::LE; break;
2069   case ISD::SETNE:
2070   case ISD::SETUNE: CondCode = ARMCC::NE; break;
2071   }
2072 }
2073 
2074 //===----------------------------------------------------------------------===//
2075 //                      Calling Convention Implementation
2076 //===----------------------------------------------------------------------===//
2077 
2078 /// getEffectiveCallingConv - Get the effective calling convention, taking into
2079 /// account presence of floating point hardware and calling convention
2080 /// limitations, such as support for variadic functions.
2081 CallingConv::ID
2082 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
2083                                            bool isVarArg) const {
2084   switch (CC) {
2085   default:
2086     report_fatal_error("Unsupported calling convention");
2087   case CallingConv::ARM_AAPCS:
2088   case CallingConv::ARM_APCS:
2089   case CallingConv::GHC:
2090   case CallingConv::CFGuard_Check:
2091     return CC;
2092   case CallingConv::PreserveMost:
2093     return CallingConv::PreserveMost;
2094   case CallingConv::PreserveAll:
2095     return CallingConv::PreserveAll;
2096   case CallingConv::ARM_AAPCS_VFP:
2097   case CallingConv::Swift:
2098   case CallingConv::SwiftTail:
2099     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
2100   case CallingConv::C:
2101   case CallingConv::Tail:
2102     if (!Subtarget->isAAPCS_ABI())
2103       return CallingConv::ARM_APCS;
2104     else if (Subtarget->hasFPRegs() && !Subtarget->isThumb1Only() &&
2105              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
2106              !isVarArg)
2107       return CallingConv::ARM_AAPCS_VFP;
2108     else
2109       return CallingConv::ARM_AAPCS;
2110   case CallingConv::Fast:
2111   case CallingConv::CXX_FAST_TLS:
2112     if (!Subtarget->isAAPCS_ABI()) {
2113       if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
2114         return CallingConv::Fast;
2115       return CallingConv::ARM_APCS;
2116     } else if (Subtarget->hasVFP2Base() &&
2117                !Subtarget->isThumb1Only() && !isVarArg)
2118       return CallingConv::ARM_AAPCS_VFP;
2119     else
2120       return CallingConv::ARM_AAPCS;
2121   }
2122 }
2123 
2124 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
2125                                                  bool isVarArg) const {
2126   return CCAssignFnForNode(CC, false, isVarArg);
2127 }
2128 
2129 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
2130                                                    bool isVarArg) const {
2131   return CCAssignFnForNode(CC, true, isVarArg);
2132 }
2133 
2134 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
2135 /// CallingConvention.
2136 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
2137                                                  bool Return,
2138                                                  bool isVarArg) const {
2139   switch (getEffectiveCallingConv(CC, isVarArg)) {
2140   default:
2141     report_fatal_error("Unsupported calling convention");
2142   case CallingConv::ARM_APCS:
2143     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
2144   case CallingConv::ARM_AAPCS:
2145     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2146   case CallingConv::ARM_AAPCS_VFP:
2147     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
2148   case CallingConv::Fast:
2149     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
2150   case CallingConv::GHC:
2151     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
2152   case CallingConv::PreserveMost:
2153     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2154   case CallingConv::PreserveAll:
2155     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2156   case CallingConv::CFGuard_Check:
2157     return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
2158   }
2159 }
2160 
2161 SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
2162                                      MVT LocVT, MVT ValVT, SDValue Val) const {
2163   Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()),
2164                     Val);
2165   if (Subtarget->hasFullFP16()) {
2166     Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val);
2167   } else {
2168     Val = DAG.getNode(ISD::TRUNCATE, dl,
2169                       MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2170     Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val);
2171   }
2172   return Val;
2173 }
2174 
2175 SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
2176                                        MVT LocVT, MVT ValVT,
2177                                        SDValue Val) const {
2178   if (Subtarget->hasFullFP16()) {
2179     Val = DAG.getNode(ARMISD::VMOVrh, dl,
2180                       MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2181   } else {
2182     Val = DAG.getNode(ISD::BITCAST, dl,
2183                       MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2184     Val = DAG.getNode(ISD::ZERO_EXTEND, dl,
2185                       MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2186   }
2187   return DAG.getNode(ISD::BITCAST, dl, LocVT, Val);
2188 }
2189 
2190 /// LowerCallResult - Lower the result values of a call into the
2191 /// appropriate copies out of appropriate physical registers.
2192 SDValue ARMTargetLowering::LowerCallResult(
2193     SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
2194     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2195     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
2196     SDValue ThisVal) const {
2197   // Assign locations to each value returned by this call.
2198   SmallVector<CCValAssign, 16> RVLocs;
2199   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2200                  *DAG.getContext());
2201   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
2202 
2203   // Copy all of the result registers out of their specified physreg.
2204   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2205     CCValAssign VA = RVLocs[i];
2206 
2207     // Pass 'this' value directly from the argument to return value, to avoid
2208     // reg unit interference
2209     if (i == 0 && isThisReturn) {
2210       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
2211              "unexpected return calling convention register assignment");
2212       InVals.push_back(ThisVal);
2213       continue;
2214     }
2215 
2216     SDValue Val;
2217     if (VA.needsCustom() &&
2218         (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) {
2219       // Handle f64 or half of a v2f64.
2220       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2221                                       InGlue);
2222       Chain = Lo.getValue(1);
2223       InGlue = Lo.getValue(2);
2224       VA = RVLocs[++i]; // skip ahead to next loc
2225       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2226                                       InGlue);
2227       Chain = Hi.getValue(1);
2228       InGlue = Hi.getValue(2);
2229       if (!Subtarget->isLittle())
2230         std::swap (Lo, Hi);
2231       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2232 
2233       if (VA.getLocVT() == MVT::v2f64) {
2234         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2235         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2236                           DAG.getConstant(0, dl, MVT::i32));
2237 
2238         VA = RVLocs[++i]; // skip ahead to next loc
2239         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InGlue);
2240         Chain = Lo.getValue(1);
2241         InGlue = Lo.getValue(2);
2242         VA = RVLocs[++i]; // skip ahead to next loc
2243         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InGlue);
2244         Chain = Hi.getValue(1);
2245         InGlue = Hi.getValue(2);
2246         if (!Subtarget->isLittle())
2247           std::swap (Lo, Hi);
2248         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2249         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2250                           DAG.getConstant(1, dl, MVT::i32));
2251       }
2252     } else {
2253       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
2254                                InGlue);
2255       Chain = Val.getValue(1);
2256       InGlue = Val.getValue(2);
2257     }
2258 
2259     switch (VA.getLocInfo()) {
2260     default: llvm_unreachable("Unknown loc info!");
2261     case CCValAssign::Full: break;
2262     case CCValAssign::BCvt:
2263       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
2264       break;
2265     }
2266 
2267     // f16 arguments have their size extended to 4 bytes and passed as if they
2268     // had been copied to the LSBs of a 32-bit register.
2269     // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2270     if (VA.needsCustom() &&
2271         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
2272       Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
2273 
2274     InVals.push_back(Val);
2275   }
2276 
2277   return Chain;
2278 }
2279 
2280 std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2281     const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr,
2282     bool IsTailCall, int SPDiff) const {
2283   SDValue DstAddr;
2284   MachinePointerInfo DstInfo;
2285   int32_t Offset = VA.getLocMemOffset();
2286   MachineFunction &MF = DAG.getMachineFunction();
2287 
2288   if (IsTailCall) {
2289         Offset += SPDiff;
2290         auto PtrVT = getPointerTy(DAG.getDataLayout());
2291         int Size = VA.getLocVT().getFixedSizeInBits() / 8;
2292         int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
2293         DstAddr = DAG.getFrameIndex(FI, PtrVT);
2294         DstInfo =
2295             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
2296   } else {
2297         SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
2298         DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2299                               StackPtr, PtrOff);
2300         DstInfo =
2301             MachinePointerInfo::getStack(DAG.getMachineFunction(), Offset);
2302   }
2303 
2304   return std::make_pair(DstAddr, DstInfo);
2305 }
2306 
2307 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
2308                                          SDValue Chain, SDValue &Arg,
2309                                          RegsToPassVector &RegsToPass,
2310                                          CCValAssign &VA, CCValAssign &NextVA,
2311                                          SDValue &StackPtr,
2312                                          SmallVectorImpl<SDValue> &MemOpChains,
2313                                          bool IsTailCall,
2314                                          int SPDiff) const {
2315   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2316                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
2317   unsigned id = Subtarget->isLittle() ? 0 : 1;
2318   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
2319 
2320   if (NextVA.isRegLoc())
2321     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
2322   else {
2323     assert(NextVA.isMemLoc());
2324     if (!StackPtr.getNode())
2325       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
2326                                     getPointerTy(DAG.getDataLayout()));
2327 
2328     SDValue DstAddr;
2329     MachinePointerInfo DstInfo;
2330     std::tie(DstAddr, DstInfo) =
2331         computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff);
2332     MemOpChains.push_back(
2333         DAG.getStore(Chain, dl, fmrrd.getValue(1 - id), DstAddr, DstInfo));
2334   }
2335 }
2336 
2337 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
2338   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
2339          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
2340 }
2341 
2342 /// LowerCall - Lowering a call into a callseq_start <-
2343 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
2344 /// nodes.
2345 SDValue
2346 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2347                              SmallVectorImpl<SDValue> &InVals) const {
2348   SelectionDAG &DAG                     = CLI.DAG;
2349   SDLoc &dl                             = CLI.DL;
2350   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2351   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
2352   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
2353   SDValue Chain                         = CLI.Chain;
2354   SDValue Callee                        = CLI.Callee;
2355   bool &isTailCall                      = CLI.IsTailCall;
2356   CallingConv::ID CallConv              = CLI.CallConv;
2357   bool doesNotRet                       = CLI.DoesNotReturn;
2358   bool isVarArg                         = CLI.IsVarArg;
2359 
2360   MachineFunction &MF = DAG.getMachineFunction();
2361   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2362   MachineFunction::CallSiteInfo CSInfo;
2363   bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
2364   bool isThisReturn = false;
2365   bool isCmseNSCall   = false;
2366   bool isSibCall = false;
2367   bool PreferIndirect = false;
2368   bool GuardWithBTI = false;
2369 
2370   // Lower 'returns_twice' calls to a pseudo-instruction.
2371   if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
2372       !Subtarget->noBTIAtReturnTwice())
2373     GuardWithBTI = AFI->branchTargetEnforcement();
2374 
2375   // Determine whether this is a non-secure function call.
2376   if (CLI.CB && CLI.CB->getAttributes().hasFnAttr("cmse_nonsecure_call"))
2377     isCmseNSCall = true;
2378 
2379   // Disable tail calls if they're not supported.
2380   if (!Subtarget->supportsTailCall())
2381     isTailCall = false;
2382 
2383   // For both the non-secure calls and the returns from a CMSE entry function,
2384   // the function needs to do some extra work afte r the call, or before the
2385   // return, respectively, thus it cannot end with atail call
2386   if (isCmseNSCall || AFI->isCmseNSEntryFunction())
2387     isTailCall = false;
2388 
2389   if (isa<GlobalAddressSDNode>(Callee)) {
2390     // If we're optimizing for minimum size and the function is called three or
2391     // more times in this block, we can improve codesize by calling indirectly
2392     // as BLXr has a 16-bit encoding.
2393     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2394     if (CLI.CB) {
2395       auto *BB = CLI.CB->getParent();
2396       PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
2397                        count_if(GV->users(), [&BB](const User *U) {
2398                          return isa<Instruction>(U) &&
2399                                 cast<Instruction>(U)->getParent() == BB;
2400                        }) > 2;
2401     }
2402   }
2403   if (isTailCall) {
2404     // Check if it's really possible to do a tail call.
2405     isTailCall = IsEligibleForTailCallOptimization(
2406         Callee, CallConv, isVarArg, isStructRet,
2407         MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
2408         PreferIndirect);
2409 
2410     if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt &&
2411         CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail)
2412       isSibCall = true;
2413 
2414     // We don't support GuaranteedTailCallOpt for ARM, only automatically
2415     // detected sibcalls.
2416     if (isTailCall)
2417       ++NumTailCalls;
2418   }
2419 
2420   if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
2421     report_fatal_error("failed to perform tail call elimination on a call "
2422                        "site marked musttail");
2423   // Analyze operands of the call, assigning locations to each operand.
2424   SmallVector<CCValAssign, 16> ArgLocs;
2425   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2426                  *DAG.getContext());
2427   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
2428 
2429   // Get a count of how many bytes are to be pushed on the stack.
2430   unsigned NumBytes = CCInfo.getStackSize();
2431 
2432   // SPDiff is the byte offset of the call's argument area from the callee's.
2433   // Stores to callee stack arguments will be placed in FixedStackSlots offset
2434   // by this amount for a tail call. In a sibling call it must be 0 because the
2435   // caller will deallocate the entire stack and the callee still expects its
2436   // arguments to begin at SP+0. Completely unused for non-tail calls.
2437   int SPDiff = 0;
2438 
2439   if (isTailCall && !isSibCall) {
2440     auto FuncInfo = MF.getInfo<ARMFunctionInfo>();
2441     unsigned NumReusableBytes = FuncInfo->getArgumentStackSize();
2442 
2443     // Since callee will pop argument stack as a tail call, we must keep the
2444     // popped size 16-byte aligned.
2445     Align StackAlign = DAG.getDataLayout().getStackAlignment();
2446     NumBytes = alignTo(NumBytes, StackAlign);
2447 
2448     // SPDiff will be negative if this tail call requires more space than we
2449     // would automatically have in our incoming argument space. Positive if we
2450     // can actually shrink the stack.
2451     SPDiff = NumReusableBytes - NumBytes;
2452 
2453     // If this call requires more stack than we have available from
2454     // LowerFormalArguments, tell FrameLowering to reserve space for it.
2455     if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff)
2456       AFI->setArgRegsSaveSize(-SPDiff);
2457   }
2458 
2459   if (isSibCall) {
2460     // For sibling tail calls, memory operands are available in our caller's stack.
2461     NumBytes = 0;
2462   } else {
2463     // Adjust the stack pointer for the new arguments...
2464     // These operations are automatically eliminated by the prolog/epilog pass
2465     Chain = DAG.getCALLSEQ_START(Chain, isTailCall ? 0 : NumBytes, 0, dl);
2466   }
2467 
2468   SDValue StackPtr =
2469       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
2470 
2471   RegsToPassVector RegsToPass;
2472   SmallVector<SDValue, 8> MemOpChains;
2473 
2474   // During a tail call, stores to the argument area must happen after all of
2475   // the function's incoming arguments have been loaded because they may alias.
2476   // This is done by folding in a TokenFactor from LowerFormalArguments, but
2477   // there's no point in doing so repeatedly so this tracks whether that's
2478   // happened yet.
2479   bool AfterFormalArgLoads = false;
2480 
2481   // Walk the register/memloc assignments, inserting copies/loads.  In the case
2482   // of tail call optimization, arguments are handled later.
2483   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2484        i != e;
2485        ++i, ++realArgIdx) {
2486     CCValAssign &VA = ArgLocs[i];
2487     SDValue Arg = OutVals[realArgIdx];
2488     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2489     bool isByVal = Flags.isByVal();
2490 
2491     // Promote the value if needed.
2492     switch (VA.getLocInfo()) {
2493     default: llvm_unreachable("Unknown loc info!");
2494     case CCValAssign::Full: break;
2495     case CCValAssign::SExt:
2496       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
2497       break;
2498     case CCValAssign::ZExt:
2499       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
2500       break;
2501     case CCValAssign::AExt:
2502       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
2503       break;
2504     case CCValAssign::BCvt:
2505       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2506       break;
2507     }
2508 
2509     if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) {
2510       Chain = DAG.getStackArgumentTokenFactor(Chain);
2511       AfterFormalArgLoads = true;
2512     }
2513 
2514     // f16 arguments have their size extended to 4 bytes and passed as if they
2515     // had been copied to the LSBs of a 32-bit register.
2516     // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2517     if (VA.needsCustom() &&
2518         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
2519       Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
2520     } else {
2521       // f16 arguments could have been extended prior to argument lowering.
2522       // Mask them arguments if this is a CMSE nonsecure call.
2523       auto ArgVT = Outs[realArgIdx].ArgVT;
2524       if (isCmseNSCall && (ArgVT == MVT::f16)) {
2525         auto LocBits = VA.getLocVT().getSizeInBits();
2526         auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits());
2527         SDValue Mask =
2528             DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
2529         Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
2530         Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
2531         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2532       }
2533     }
2534 
2535     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2536     if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
2537       SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2538                                 DAG.getConstant(0, dl, MVT::i32));
2539       SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2540                                 DAG.getConstant(1, dl, MVT::i32));
2541 
2542       PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2543                        StackPtr, MemOpChains, isTailCall, SPDiff);
2544 
2545       VA = ArgLocs[++i]; // skip ahead to next loc
2546       if (VA.isRegLoc()) {
2547         PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2548                          StackPtr, MemOpChains, isTailCall, SPDiff);
2549       } else {
2550         assert(VA.isMemLoc());
2551         SDValue DstAddr;
2552         MachinePointerInfo DstInfo;
2553         std::tie(DstAddr, DstInfo) =
2554             computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2555         MemOpChains.push_back(DAG.getStore(Chain, dl, Op1, DstAddr, DstInfo));
2556       }
2557     } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
2558       PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2559                        StackPtr, MemOpChains, isTailCall, SPDiff);
2560     } else if (VA.isRegLoc()) {
2561       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2562           Outs[0].VT == MVT::i32) {
2563         assert(VA.getLocVT() == MVT::i32 &&
2564                "unexpected calling convention register assignment");
2565         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
2566                "unexpected use of 'returned'");
2567         isThisReturn = true;
2568       }
2569       const TargetOptions &Options = DAG.getTarget().Options;
2570       if (Options.EmitCallSiteInfo)
2571         CSInfo.emplace_back(VA.getLocReg(), i);
2572       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2573     } else if (isByVal) {
2574       assert(VA.isMemLoc());
2575       unsigned offset = 0;
2576 
2577       // True if this byval aggregate will be split between registers
2578       // and memory.
2579       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2580       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2581 
2582       if (CurByValIdx < ByValArgsCount) {
2583 
2584         unsigned RegBegin, RegEnd;
2585         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2586 
2587         EVT PtrVT =
2588             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2589         unsigned int i, j;
2590         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2591           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
2592           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2593           SDValue Load =
2594               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(),
2595                           DAG.InferPtrAlign(AddArg));
2596           MemOpChains.push_back(Load.getValue(1));
2597           RegsToPass.push_back(std::make_pair(j, Load));
2598         }
2599 
2600         // If parameter size outsides register area, "offset" value
2601         // helps us to calculate stack slot for remained part properly.
2602         offset = RegEnd - RegBegin;
2603 
2604         CCInfo.nextInRegsParam();
2605       }
2606 
2607       if (Flags.getByValSize() > 4*offset) {
2608         auto PtrVT = getPointerTy(DAG.getDataLayout());
2609         SDValue Dst;
2610         MachinePointerInfo DstInfo;
2611         std::tie(Dst, DstInfo) =
2612             computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2613         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
2614         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
2615         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
2616                                            MVT::i32);
2617         SDValue AlignNode =
2618             DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32);
2619 
2620         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2621         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2622         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
2623                                           Ops));
2624       }
2625     } else {
2626       assert(VA.isMemLoc());
2627       SDValue DstAddr;
2628       MachinePointerInfo DstInfo;
2629       std::tie(DstAddr, DstInfo) =
2630           computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2631 
2632       SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo);
2633       MemOpChains.push_back(Store);
2634     }
2635   }
2636 
2637   if (!MemOpChains.empty())
2638     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2639 
2640   // Build a sequence of copy-to-reg nodes chained together with token chain
2641   // and flag operands which copy the outgoing args into the appropriate regs.
2642   SDValue InGlue;
2643   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2644     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2645                              RegsToPass[i].second, InGlue);
2646     InGlue = Chain.getValue(1);
2647   }
2648 
2649   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2650   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2651   // node so that legalize doesn't hack it.
2652   bool isDirect = false;
2653 
2654   const TargetMachine &TM = getTargetMachine();
2655   const Module *Mod = MF.getFunction().getParent();
2656   const GlobalValue *GVal = nullptr;
2657   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2658     GVal = G->getGlobal();
2659   bool isStub =
2660       !TM.shouldAssumeDSOLocal(*Mod, GVal) && Subtarget->isTargetMachO();
2661 
2662   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2663   bool isLocalARMFunc = false;
2664   auto PtrVt = getPointerTy(DAG.getDataLayout());
2665 
2666   if (Subtarget->genLongCalls()) {
2667     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2668            "long-calls codegen is not position independent!");
2669     // Handle a global address or an external symbol. If it's not one of
2670     // those, the target's already in a register, so we don't need to do
2671     // anything extra.
2672     if (isa<GlobalAddressSDNode>(Callee)) {
2673       if (Subtarget->genExecuteOnly()) {
2674         if (Subtarget->useMovt())
2675           ++NumMovwMovt;
2676         Callee = DAG.getNode(ARMISD::Wrapper, dl, PtrVt,
2677                              DAG.getTargetGlobalAddress(GVal, dl, PtrVt));
2678       } else {
2679         // Create a constant pool entry for the callee address
2680         unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2681         ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
2682             GVal, ARMPCLabelIndex, ARMCP::CPValue, 0);
2683 
2684         // Get the address of the callee into a register
2685         SDValue Addr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2686         Addr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Addr);
2687         Callee = DAG.getLoad(
2688             PtrVt, dl, DAG.getEntryNode(), Addr,
2689             MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2690       }
2691     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2692       const char *Sym = S->getSymbol();
2693 
2694       if (Subtarget->genExecuteOnly()) {
2695         if (Subtarget->useMovt())
2696           ++NumMovwMovt;
2697         Callee = DAG.getNode(ARMISD::Wrapper, dl, PtrVt,
2698                              DAG.getTargetGlobalAddress(GVal, dl, PtrVt));
2699       } else {
2700         // Create a constant pool entry for the callee address
2701         unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2702         ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create(
2703             *DAG.getContext(), Sym, ARMPCLabelIndex, 0);
2704 
2705         // Get the address of the callee into a register
2706         SDValue Addr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2707         Addr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Addr);
2708         Callee = DAG.getLoad(
2709             PtrVt, dl, DAG.getEntryNode(), Addr,
2710             MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2711       }
2712     }
2713   } else if (isa<GlobalAddressSDNode>(Callee)) {
2714     if (!PreferIndirect) {
2715       isDirect = true;
2716       bool isDef = GVal->isStrongDefinitionForLinker();
2717 
2718       // ARM call to a local ARM function is predicable.
2719       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2720       // tBX takes a register source operand.
2721       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2722         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2723         Callee = DAG.getNode(
2724             ARMISD::WrapperPIC, dl, PtrVt,
2725             DAG.getTargetGlobalAddress(GVal, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2726         Callee = DAG.getLoad(
2727             PtrVt, dl, DAG.getEntryNode(), Callee,
2728             MachinePointerInfo::getGOT(DAG.getMachineFunction()), MaybeAlign(),
2729             MachineMemOperand::MODereferenceable |
2730                 MachineMemOperand::MOInvariant);
2731       } else if (Subtarget->isTargetCOFF()) {
2732         assert(Subtarget->isTargetWindows() &&
2733                "Windows is the only supported COFF target");
2734         unsigned TargetFlags = ARMII::MO_NO_FLAG;
2735         if (GVal->hasDLLImportStorageClass())
2736           TargetFlags = ARMII::MO_DLLIMPORT;
2737         else if (!TM.shouldAssumeDSOLocal(*GVal->getParent(), GVal))
2738           TargetFlags = ARMII::MO_COFFSTUB;
2739         Callee = DAG.getTargetGlobalAddress(GVal, dl, PtrVt, /*offset=*/0,
2740                                             TargetFlags);
2741         if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
2742           Callee =
2743               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2744                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2745                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2746       } else {
2747         Callee = DAG.getTargetGlobalAddress(GVal, dl, PtrVt, 0, 0);
2748       }
2749     }
2750   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2751     isDirect = true;
2752     // tBX takes a register source operand.
2753     const char *Sym = S->getSymbol();
2754     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2755       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2756       ARMConstantPoolValue *CPV =
2757         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2758                                       ARMPCLabelIndex, 4);
2759       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2760       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2761       Callee = DAG.getLoad(
2762           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2763           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2764       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2765       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2766     } else {
2767       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2768     }
2769   }
2770 
2771   if (isCmseNSCall) {
2772     assert(!isARMFunc && !isDirect &&
2773            "Cannot handle call to ARM function or direct call");
2774     if (NumBytes > 0) {
2775       DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(),
2776                                      "call to non-secure function would "
2777                                      "require passing arguments on stack",
2778                                      dl.getDebugLoc());
2779       DAG.getContext()->diagnose(Diag);
2780     }
2781     if (isStructRet) {
2782       DiagnosticInfoUnsupported Diag(
2783           DAG.getMachineFunction().getFunction(),
2784           "call to non-secure function would return value through pointer",
2785           dl.getDebugLoc());
2786       DAG.getContext()->diagnose(Diag);
2787     }
2788   }
2789 
2790   // FIXME: handle tail calls differently.
2791   unsigned CallOpc;
2792   if (Subtarget->isThumb()) {
2793     if (GuardWithBTI)
2794       CallOpc = ARMISD::t2CALL_BTI;
2795     else if (isCmseNSCall)
2796       CallOpc = ARMISD::tSECALL;
2797     else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2798       CallOpc = ARMISD::CALL_NOLINK;
2799     else
2800       CallOpc = ARMISD::CALL;
2801   } else {
2802     if (!isDirect && !Subtarget->hasV5TOps())
2803       CallOpc = ARMISD::CALL_NOLINK;
2804     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2805              // Emit regular call when code size is the priority
2806              !Subtarget->hasMinSize())
2807       // "mov lr, pc; b _foo" to avoid confusing the RSP
2808       CallOpc = ARMISD::CALL_NOLINK;
2809     else
2810       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2811   }
2812 
2813   // We don't usually want to end the call-sequence here because we would tidy
2814   // the frame up *after* the call, however in the ABI-changing tail-call case
2815   // we've carefully laid out the parameters so that when sp is reset they'll be
2816   // in the correct location.
2817   if (isTailCall && !isSibCall) {
2818     Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, dl);
2819     InGlue = Chain.getValue(1);
2820   }
2821 
2822   std::vector<SDValue> Ops;
2823   Ops.push_back(Chain);
2824   Ops.push_back(Callee);
2825 
2826   if (isTailCall) {
2827     Ops.push_back(DAG.getTargetConstant(SPDiff, dl, MVT::i32));
2828   }
2829 
2830   // Add argument registers to the end of the list so that they are known live
2831   // into the call.
2832   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2833     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2834                                   RegsToPass[i].second.getValueType()));
2835 
2836   // Add a register mask operand representing the call-preserved registers.
2837   const uint32_t *Mask;
2838   const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2839   if (isThisReturn) {
2840     // For 'this' returns, use the R0-preserving mask if applicable
2841     Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2842     if (!Mask) {
2843       // Set isThisReturn to false if the calling convention is not one that
2844       // allows 'returned' to be modeled in this way, so LowerCallResult does
2845       // not try to pass 'this' straight through
2846       isThisReturn = false;
2847       Mask = ARI->getCallPreservedMask(MF, CallConv);
2848     }
2849   } else
2850     Mask = ARI->getCallPreservedMask(MF, CallConv);
2851 
2852   assert(Mask && "Missing call preserved mask for calling convention");
2853   Ops.push_back(DAG.getRegisterMask(Mask));
2854 
2855   if (InGlue.getNode())
2856     Ops.push_back(InGlue);
2857 
2858   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2859   if (isTailCall) {
2860     MF.getFrameInfo().setHasTailCall();
2861     SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2862     DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
2863     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
2864     return Ret;
2865   }
2866 
2867   // Returns a chain and a flag for retval copy to use.
2868   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2869   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2870   InGlue = Chain.getValue(1);
2871   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
2872 
2873   // If we're guaranteeing tail-calls will be honoured, the callee must
2874   // pop its own argument stack on return. But this call is *not* a tail call so
2875   // we need to undo that after it returns to restore the status-quo.
2876   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
2877   uint64_t CalleePopBytes =
2878       canGuaranteeTCO(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : -1ULL;
2879 
2880   Chain = DAG.getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InGlue, dl);
2881   if (!Ins.empty())
2882     InGlue = Chain.getValue(1);
2883 
2884   // Handle result values, copying them out of physregs into vregs that we
2885   // return.
2886   return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2887                          InVals, isThisReturn,
2888                          isThisReturn ? OutVals[0] : SDValue());
2889 }
2890 
2891 /// HandleByVal - Every parameter *after* a byval parameter is passed
2892 /// on the stack.  Remember the next parameter register to allocate,
2893 /// and then confiscate the rest of the parameter registers to insure
2894 /// this.
2895 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2896                                     Align Alignment) const {
2897   // Byval (as with any stack) slots are always at least 4 byte aligned.
2898   Alignment = std::max(Alignment, Align(4));
2899 
2900   unsigned Reg = State->AllocateReg(GPRArgRegs);
2901   if (!Reg)
2902     return;
2903 
2904   unsigned AlignInRegs = Alignment.value() / 4;
2905   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2906   for (unsigned i = 0; i < Waste; ++i)
2907     Reg = State->AllocateReg(GPRArgRegs);
2908 
2909   if (!Reg)
2910     return;
2911 
2912   unsigned Excess = 4 * (ARM::R4 - Reg);
2913 
2914   // Special case when NSAA != SP and parameter size greater than size of
2915   // all remained GPR regs. In that case we can't split parameter, we must
2916   // send it to stack. We also must set NCRN to R4, so waste all
2917   // remained registers.
2918   const unsigned NSAAOffset = State->getStackSize();
2919   if (NSAAOffset != 0 && Size > Excess) {
2920     while (State->AllocateReg(GPRArgRegs))
2921       ;
2922     return;
2923   }
2924 
2925   // First register for byval parameter is the first register that wasn't
2926   // allocated before this method call, so it would be "reg".
2927   // If parameter is small enough to be saved in range [reg, r4), then
2928   // the end (first after last) register would be reg + param-size-in-regs,
2929   // else parameter would be splitted between registers and stack,
2930   // end register would be r4 in this case.
2931   unsigned ByValRegBegin = Reg;
2932   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2933   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2934   // Note, first register is allocated in the beginning of function already,
2935   // allocate remained amount of registers we need.
2936   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2937     State->AllocateReg(GPRArgRegs);
2938   // A byval parameter that is split between registers and memory needs its
2939   // size truncated here.
2940   // In the case where the entire structure fits in registers, we set the
2941   // size in memory to zero.
2942   Size = std::max<int>(Size - Excess, 0);
2943 }
2944 
2945 /// MatchingStackOffset - Return true if the given stack call argument is
2946 /// already available in the same position (relatively) of the caller's
2947 /// incoming argument stack.
2948 static
2949 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2950                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2951                          const TargetInstrInfo *TII) {
2952   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2953   int FI = std::numeric_limits<int>::max();
2954   if (Arg.getOpcode() == ISD::CopyFromReg) {
2955     Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2956     if (!VR.isVirtual())
2957       return false;
2958     MachineInstr *Def = MRI->getVRegDef(VR);
2959     if (!Def)
2960       return false;
2961     if (!Flags.isByVal()) {
2962       if (!TII->isLoadFromStackSlot(*Def, FI))
2963         return false;
2964     } else {
2965       return false;
2966     }
2967   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2968     if (Flags.isByVal())
2969       // ByVal argument is passed in as a pointer but it's now being
2970       // dereferenced. e.g.
2971       // define @foo(%struct.X* %A) {
2972       //   tail call @bar(%struct.X* byval %A)
2973       // }
2974       return false;
2975     SDValue Ptr = Ld->getBasePtr();
2976     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2977     if (!FINode)
2978       return false;
2979     FI = FINode->getIndex();
2980   } else
2981     return false;
2982 
2983   assert(FI != std::numeric_limits<int>::max());
2984   if (!MFI.isFixedObjectIndex(FI))
2985     return false;
2986   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2987 }
2988 
2989 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2990 /// for tail call optimization. Targets which want to do tail call
2991 /// optimization should implement this function.
2992 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2993     SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
2994     bool isCalleeStructRet, bool isCallerStructRet,
2995     const SmallVectorImpl<ISD::OutputArg> &Outs,
2996     const SmallVectorImpl<SDValue> &OutVals,
2997     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
2998     const bool isIndirect) const {
2999   MachineFunction &MF = DAG.getMachineFunction();
3000   const Function &CallerF = MF.getFunction();
3001   CallingConv::ID CallerCC = CallerF.getCallingConv();
3002 
3003   assert(Subtarget->supportsTailCall());
3004 
3005   // Indirect tail calls cannot be optimized for Thumb1 if the args
3006   // to the call take up r0-r3. The reason is that there are no legal registers
3007   // left to hold the pointer to the function to be called.
3008   // Similarly, if the function uses return address sign and authentication,
3009   // r12 is needed to hold the PAC and is not available to hold the callee
3010   // address.
3011   if (Outs.size() >= 4 &&
3012       (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) {
3013     if (Subtarget->isThumb1Only())
3014       return false;
3015     // Conservatively assume the function spills LR.
3016     if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(true))
3017       return false;
3018   }
3019 
3020   // Look for obvious safe cases to perform tail call optimization that do not
3021   // require ABI changes. This is what gcc calls sibcall.
3022 
3023   // Exception-handling functions need a special set of instructions to indicate
3024   // a return to the hardware. Tail-calling another function would probably
3025   // break this.
3026   if (CallerF.hasFnAttribute("interrupt"))
3027     return false;
3028 
3029   if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
3030     return CalleeCC == CallerCC;
3031 
3032   // Also avoid sibcall optimization if either caller or callee uses struct
3033   // return semantics.
3034   if (isCalleeStructRet || isCallerStructRet)
3035     return false;
3036 
3037   // Externally-defined functions with weak linkage should not be
3038   // tail-called on ARM when the OS does not support dynamic
3039   // pre-emption of symbols, as the AAELF spec requires normal calls
3040   // to undefined weak functions to be replaced with a NOP or jump to the
3041   // next instruction. The behaviour of branch instructions in this
3042   // situation (as used for tail calls) is implementation-defined, so we
3043   // cannot rely on the linker replacing the tail call with a return.
3044   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3045     const GlobalValue *GV = G->getGlobal();
3046     const Triple &TT = getTargetMachine().getTargetTriple();
3047     if (GV->hasExternalWeakLinkage() &&
3048         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
3049       return false;
3050   }
3051 
3052   // Check that the call results are passed in the same way.
3053   LLVMContext &C = *DAG.getContext();
3054   if (!CCState::resultsCompatible(
3055           getEffectiveCallingConv(CalleeCC, isVarArg),
3056           getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins,
3057           CCAssignFnForReturn(CalleeCC, isVarArg),
3058           CCAssignFnForReturn(CallerCC, CallerF.isVarArg())))
3059     return false;
3060   // The callee has to preserve all registers the caller needs to preserve.
3061   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
3062   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
3063   if (CalleeCC != CallerCC) {
3064     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
3065     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3066       return false;
3067   }
3068 
3069   // If Caller's vararg or byval argument has been split between registers and
3070   // stack, do not perform tail call, since part of the argument is in caller's
3071   // local frame.
3072   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
3073   if (AFI_Caller->getArgRegsSaveSize())
3074     return false;
3075 
3076   // If the callee takes no arguments then go on to check the results of the
3077   // call.
3078   if (!Outs.empty()) {
3079     // Check if stack adjustment is needed. For now, do not do this if any
3080     // argument is passed on the stack.
3081     SmallVector<CCValAssign, 16> ArgLocs;
3082     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
3083     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
3084     if (CCInfo.getStackSize()) {
3085       // Check if the arguments are already laid out in the right way as
3086       // the caller's fixed stack objects.
3087       MachineFrameInfo &MFI = MF.getFrameInfo();
3088       const MachineRegisterInfo *MRI = &MF.getRegInfo();
3089       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3090       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
3091            i != e;
3092            ++i, ++realArgIdx) {
3093         CCValAssign &VA = ArgLocs[i];
3094         EVT RegVT = VA.getLocVT();
3095         SDValue Arg = OutVals[realArgIdx];
3096         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
3097         if (VA.getLocInfo() == CCValAssign::Indirect)
3098           return false;
3099         if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
3100           // f64 and vector types are split into multiple registers or
3101           // register/stack-slot combinations.  The types will not match
3102           // the registers; give up on memory f64 refs until we figure
3103           // out what to do about this.
3104           if (!VA.isRegLoc())
3105             return false;
3106           if (!ArgLocs[++i].isRegLoc())
3107             return false;
3108           if (RegVT == MVT::v2f64) {
3109             if (!ArgLocs[++i].isRegLoc())
3110               return false;
3111             if (!ArgLocs[++i].isRegLoc())
3112               return false;
3113           }
3114         } else if (!VA.isRegLoc()) {
3115           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3116                                    MFI, MRI, TII))
3117             return false;
3118         }
3119       }
3120     }
3121 
3122     const MachineRegisterInfo &MRI = MF.getRegInfo();
3123     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
3124       return false;
3125   }
3126 
3127   return true;
3128 }
3129 
3130 bool
3131 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3132                                   MachineFunction &MF, bool isVarArg,
3133                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
3134                                   LLVMContext &Context) const {
3135   SmallVector<CCValAssign, 16> RVLocs;
3136   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3137   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
3138 }
3139 
3140 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3141                                     const SDLoc &DL, SelectionDAG &DAG) {
3142   const MachineFunction &MF = DAG.getMachineFunction();
3143   const Function &F = MF.getFunction();
3144 
3145   StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
3146 
3147   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
3148   // version of the "preferred return address". These offsets affect the return
3149   // instruction if this is a return from PL1 without hypervisor extensions.
3150   //    IRQ/FIQ: +4     "subs pc, lr, #4"
3151   //    SWI:     0      "subs pc, lr, #0"
3152   //    ABORT:   +4     "subs pc, lr, #4"
3153   //    UNDEF:   +4/+2  "subs pc, lr, #0"
3154   // UNDEF varies depending on where the exception came from ARM or Thumb
3155   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
3156 
3157   int64_t LROffset;
3158   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
3159       IntKind == "ABORT")
3160     LROffset = 4;
3161   else if (IntKind == "SWI" || IntKind == "UNDEF")
3162     LROffset = 0;
3163   else
3164     report_fatal_error("Unsupported interrupt attribute. If present, value "
3165                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3166 
3167   RetOps.insert(RetOps.begin() + 1,
3168                 DAG.getConstant(LROffset, DL, MVT::i32, false));
3169 
3170   return DAG.getNode(ARMISD::INTRET_GLUE, DL, MVT::Other, RetOps);
3171 }
3172 
3173 SDValue
3174 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3175                                bool isVarArg,
3176                                const SmallVectorImpl<ISD::OutputArg> &Outs,
3177                                const SmallVectorImpl<SDValue> &OutVals,
3178                                const SDLoc &dl, SelectionDAG &DAG) const {
3179   // CCValAssign - represent the assignment of the return value to a location.
3180   SmallVector<CCValAssign, 16> RVLocs;
3181 
3182   // CCState - Info about the registers and stack slots.
3183   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3184                  *DAG.getContext());
3185 
3186   // Analyze outgoing return values.
3187   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
3188 
3189   SDValue Glue;
3190   SmallVector<SDValue, 4> RetOps;
3191   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
3192   bool isLittleEndian = Subtarget->isLittle();
3193 
3194   MachineFunction &MF = DAG.getMachineFunction();
3195   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3196   AFI->setReturnRegsCount(RVLocs.size());
3197 
3198  // Report error if cmse entry function returns structure through first ptr arg.
3199   if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) {
3200     // Note: using an empty SDLoc(), as the first line of the function is a
3201     // better place to report than the last line.
3202     DiagnosticInfoUnsupported Diag(
3203         DAG.getMachineFunction().getFunction(),
3204         "secure entry function would return value through pointer",
3205         SDLoc().getDebugLoc());
3206     DAG.getContext()->diagnose(Diag);
3207   }
3208 
3209   // Copy the result values into the output registers.
3210   for (unsigned i = 0, realRVLocIdx = 0;
3211        i != RVLocs.size();
3212        ++i, ++realRVLocIdx) {
3213     CCValAssign &VA = RVLocs[i];
3214     assert(VA.isRegLoc() && "Can only return in registers!");
3215 
3216     SDValue Arg = OutVals[realRVLocIdx];
3217     bool ReturnF16 = false;
3218 
3219     if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
3220       // Half-precision return values can be returned like this:
3221       //
3222       // t11 f16 = fadd ...
3223       // t12: i16 = bitcast t11
3224       //   t13: i32 = zero_extend t12
3225       // t14: f32 = bitcast t13  <~~~~~~~ Arg
3226       //
3227       // to avoid code generation for bitcasts, we simply set Arg to the node
3228       // that produces the f16 value, t11 in this case.
3229       //
3230       if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
3231         SDValue ZE = Arg.getOperand(0);
3232         if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
3233           SDValue BC = ZE.getOperand(0);
3234           if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
3235             Arg = BC.getOperand(0);
3236             ReturnF16 = true;
3237           }
3238         }
3239       }
3240     }
3241 
3242     switch (VA.getLocInfo()) {
3243     default: llvm_unreachable("Unknown loc info!");
3244     case CCValAssign::Full: break;
3245     case CCValAssign::BCvt:
3246       if (!ReturnF16)
3247         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3248       break;
3249     }
3250 
3251     // Mask f16 arguments if this is a CMSE nonsecure entry.
3252     auto RetVT = Outs[realRVLocIdx].ArgVT;
3253     if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) {
3254       if (VA.needsCustom() && VA.getValVT() == MVT::f16) {
3255         Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
3256       } else {
3257         auto LocBits = VA.getLocVT().getSizeInBits();
3258         auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits());
3259         SDValue Mask =
3260             DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
3261         Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
3262         Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
3263         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3264       }
3265     }
3266 
3267     if (VA.needsCustom() &&
3268         (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) {
3269       if (VA.getLocVT() == MVT::v2f64) {
3270         // Extract the first half and return it in two registers.
3271         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3272                                    DAG.getConstant(0, dl, MVT::i32));
3273         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
3274                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
3275 
3276         Chain =
3277             DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3278                              HalfGPRs.getValue(isLittleEndian ? 0 : 1), Glue);
3279         Glue = Chain.getValue(1);
3280         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3281         VA = RVLocs[++i]; // skip ahead to next loc
3282         Chain =
3283             DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3284                              HalfGPRs.getValue(isLittleEndian ? 1 : 0), Glue);
3285         Glue = Chain.getValue(1);
3286         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3287         VA = RVLocs[++i]; // skip ahead to next loc
3288 
3289         // Extract the 2nd half and fall through to handle it as an f64 value.
3290         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3291                           DAG.getConstant(1, dl, MVT::i32));
3292       }
3293       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
3294       // available.
3295       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
3296                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
3297       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3298                                fmrrd.getValue(isLittleEndian ? 0 : 1), Glue);
3299       Glue = Chain.getValue(1);
3300       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3301       VA = RVLocs[++i]; // skip ahead to next loc
3302       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3303                                fmrrd.getValue(isLittleEndian ? 1 : 0), Glue);
3304     } else
3305       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Glue);
3306 
3307     // Guarantee that all emitted copies are
3308     // stuck together, avoiding something bad.
3309     Glue = Chain.getValue(1);
3310     RetOps.push_back(DAG.getRegister(
3311         VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT()));
3312   }
3313   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
3314   const MCPhysReg *I =
3315       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3316   if (I) {
3317     for (; *I; ++I) {
3318       if (ARM::GPRRegClass.contains(*I))
3319         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
3320       else if (ARM::DPRRegClass.contains(*I))
3321         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
3322       else
3323         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3324     }
3325   }
3326 
3327   // Update chain and glue.
3328   RetOps[0] = Chain;
3329   if (Glue.getNode())
3330     RetOps.push_back(Glue);
3331 
3332   // CPUs which aren't M-class use a special sequence to return from
3333   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
3334   // though we use "subs pc, lr, #N").
3335   //
3336   // M-class CPUs actually use a normal return sequence with a special
3337   // (hardware-provided) value in LR, so the normal code path works.
3338   if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
3339       !Subtarget->isMClass()) {
3340     if (Subtarget->isThumb1Only())
3341       report_fatal_error("interrupt attribute is not supported in Thumb1");
3342     return LowerInterruptReturn(RetOps, dl, DAG);
3343   }
3344 
3345   ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_GLUE :
3346                                                             ARMISD::RET_GLUE;
3347   return DAG.getNode(RetNode, dl, MVT::Other, RetOps);
3348 }
3349 
3350 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3351   if (N->getNumValues() != 1)
3352     return false;
3353   if (!N->hasNUsesOfValue(1, 0))
3354     return false;
3355 
3356   SDValue TCChain = Chain;
3357   SDNode *Copy = *N->use_begin();
3358   if (Copy->getOpcode() == ISD::CopyToReg) {
3359     // If the copy has a glue operand, we conservatively assume it isn't safe to
3360     // perform a tail call.
3361     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3362       return false;
3363     TCChain = Copy->getOperand(0);
3364   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
3365     SDNode *VMov = Copy;
3366     // f64 returned in a pair of GPRs.
3367     SmallPtrSet<SDNode*, 2> Copies;
3368     for (SDNode *U : VMov->uses()) {
3369       if (U->getOpcode() != ISD::CopyToReg)
3370         return false;
3371       Copies.insert(U);
3372     }
3373     if (Copies.size() > 2)
3374       return false;
3375 
3376     for (SDNode *U : VMov->uses()) {
3377       SDValue UseChain = U->getOperand(0);
3378       if (Copies.count(UseChain.getNode()))
3379         // Second CopyToReg
3380         Copy = U;
3381       else {
3382         // We are at the top of this chain.
3383         // If the copy has a glue operand, we conservatively assume it
3384         // isn't safe to perform a tail call.
3385         if (U->getOperand(U->getNumOperands() - 1).getValueType() == MVT::Glue)
3386           return false;
3387         // First CopyToReg
3388         TCChain = UseChain;
3389       }
3390     }
3391   } else if (Copy->getOpcode() == ISD::BITCAST) {
3392     // f32 returned in a single GPR.
3393     if (!Copy->hasOneUse())
3394       return false;
3395     Copy = *Copy->use_begin();
3396     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
3397       return false;
3398     // If the copy has a glue operand, we conservatively assume it isn't safe to
3399     // perform a tail call.
3400     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3401       return false;
3402     TCChain = Copy->getOperand(0);
3403   } else {
3404     return false;
3405   }
3406 
3407   bool HasRet = false;
3408   for (const SDNode *U : Copy->uses()) {
3409     if (U->getOpcode() != ARMISD::RET_GLUE &&
3410         U->getOpcode() != ARMISD::INTRET_GLUE)
3411       return false;
3412     HasRet = true;
3413   }
3414 
3415   if (!HasRet)
3416     return false;
3417 
3418   Chain = TCChain;
3419   return true;
3420 }
3421 
3422 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3423   if (!Subtarget->supportsTailCall())
3424     return false;
3425 
3426   if (!CI->isTailCall())
3427     return false;
3428 
3429   return true;
3430 }
3431 
3432 // Trying to write a 64 bit value so need to split into two 32 bit values first,
3433 // and pass the lower and high parts through.
3434 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
3435   SDLoc DL(Op);
3436   SDValue WriteValue = Op->getOperand(2);
3437 
3438   // This function is only supposed to be called for i64 type argument.
3439   assert(WriteValue.getValueType() == MVT::i64
3440           && "LowerWRITE_REGISTER called for non-i64 type argument.");
3441 
3442   SDValue Lo, Hi;
3443   std::tie(Lo, Hi) = DAG.SplitScalar(WriteValue, DL, MVT::i32, MVT::i32);
3444   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
3445   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
3446 }
3447 
3448 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3449 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
3450 // one of the above mentioned nodes. It has to be wrapped because otherwise
3451 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3452 // be used to form addressing mode. These wrapped nodes will be selected
3453 // into MOVi.
3454 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
3455                                              SelectionDAG &DAG) const {
3456   EVT PtrVT = Op.getValueType();
3457   // FIXME there is no actual debug info here
3458   SDLoc dl(Op);
3459   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3460   SDValue Res;
3461 
3462   // When generating execute-only code Constant Pools must be promoted to the
3463   // global data section. It's a bit ugly that we can't share them across basic
3464   // blocks, but this way we guarantee that execute-only behaves correct with
3465   // position-independent addressing modes.
3466   if (Subtarget->genExecuteOnly()) {
3467     auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
3468     auto T = const_cast<Type*>(CP->getType());
3469     auto C = const_cast<Constant*>(CP->getConstVal());
3470     auto M = const_cast<Module*>(DAG.getMachineFunction().
3471                                  getFunction().getParent());
3472     auto GV = new GlobalVariable(
3473                     *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
3474                     Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
3475                     Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
3476                     Twine(AFI->createPICLabelUId())
3477                   );
3478     SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
3479                                             dl, PtrVT);
3480     return LowerGlobalAddress(GA, DAG);
3481   }
3482 
3483   // The 16-bit ADR instruction can only encode offsets that are multiples of 4,
3484   // so we need to align to at least 4 bytes when we don't have 32-bit ADR.
3485   Align CPAlign = CP->getAlign();
3486   if (Subtarget->isThumb1Only())
3487     CPAlign = std::max(CPAlign, Align(4));
3488   if (CP->isMachineConstantPoolEntry())
3489     Res =
3490         DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CPAlign);
3491   else
3492     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CPAlign);
3493   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
3494 }
3495 
3496 unsigned ARMTargetLowering::getJumpTableEncoding() const {
3497   // If we don't have a 32-bit pc-relative branch instruction then the jump
3498   // table consists of block addresses. Usually this is inline, but for
3499   // execute-only it must be placed out-of-line.
3500   if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps())
3501     return MachineJumpTableInfo::EK_BlockAddress;
3502   return MachineJumpTableInfo::EK_Inline;
3503 }
3504 
3505 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
3506                                              SelectionDAG &DAG) const {
3507   MachineFunction &MF = DAG.getMachineFunction();
3508   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3509   unsigned ARMPCLabelIndex = 0;
3510   SDLoc DL(Op);
3511   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3512   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
3513   SDValue CPAddr;
3514   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
3515   if (!IsPositionIndependent) {
3516     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4));
3517   } else {
3518     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3519     ARMPCLabelIndex = AFI->createPICLabelUId();
3520     ARMConstantPoolValue *CPV =
3521       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
3522                                       ARMCP::CPBlockAddress, PCAdj);
3523     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3524   }
3525   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
3526   SDValue Result = DAG.getLoad(
3527       PtrVT, DL, DAG.getEntryNode(), CPAddr,
3528       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3529   if (!IsPositionIndependent)
3530     return Result;
3531   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
3532   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
3533 }
3534 
3535 /// Convert a TLS address reference into the correct sequence of loads
3536 /// and calls to compute the variable's address for Darwin, and return an
3537 /// SDValue containing the final node.
3538 
3539 /// Darwin only has one TLS scheme which must be capable of dealing with the
3540 /// fully general situation, in the worst case. This means:
3541 ///     + "extern __thread" declaration.
3542 ///     + Defined in a possibly unknown dynamic library.
3543 ///
3544 /// The general system is that each __thread variable has a [3 x i32] descriptor
3545 /// which contains information used by the runtime to calculate the address. The
3546 /// only part of this the compiler needs to know about is the first word, which
3547 /// contains a function pointer that must be called with the address of the
3548 /// entire descriptor in "r0".
3549 ///
3550 /// Since this descriptor may be in a different unit, in general access must
3551 /// proceed along the usual ARM rules. A common sequence to produce is:
3552 ///
3553 ///     movw rT1, :lower16:_var$non_lazy_ptr
3554 ///     movt rT1, :upper16:_var$non_lazy_ptr
3555 ///     ldr r0, [rT1]
3556 ///     ldr rT2, [r0]
3557 ///     blx rT2
3558 ///     [...address now in r0...]
3559 SDValue
3560 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
3561                                                SelectionDAG &DAG) const {
3562   assert(Subtarget->isTargetDarwin() &&
3563          "This function expects a Darwin target");
3564   SDLoc DL(Op);
3565 
3566   // First step is to get the address of the actua global symbol. This is where
3567   // the TLS descriptor lives.
3568   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3569 
3570   // The first entry in the descriptor is a function pointer that we must call
3571   // to obtain the address of the variable.
3572   SDValue Chain = DAG.getEntryNode();
3573   SDValue FuncTLVGet = DAG.getLoad(
3574       MVT::i32, DL, Chain, DescAddr,
3575       MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(4),
3576       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
3577           MachineMemOperand::MOInvariant);
3578   Chain = FuncTLVGet.getValue(1);
3579 
3580   MachineFunction &F = DAG.getMachineFunction();
3581   MachineFrameInfo &MFI = F.getFrameInfo();
3582   MFI.setAdjustsStack(true);
3583 
3584   // TLS calls preserve all registers except those that absolutely must be
3585   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3586   // silly).
3587   auto TRI =
3588       getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
3589   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
3590   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
3591 
3592   // Finally, we can make the call. This is just a degenerate version of a
3593   // normal AArch64 call node: r0 takes the address of the descriptor, and
3594   // returns the address of the variable in this thread.
3595   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
3596   Chain =
3597       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
3598                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
3599                   DAG.getRegisterMask(Mask), Chain.getValue(1));
3600   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
3601 }
3602 
3603 SDValue
3604 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
3605                                                 SelectionDAG &DAG) const {
3606   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
3607 
3608   SDValue Chain = DAG.getEntryNode();
3609   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3610   SDLoc DL(Op);
3611 
3612   // Load the current TEB (thread environment block)
3613   SDValue Ops[] = {Chain,
3614                    DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
3615                    DAG.getTargetConstant(15, DL, MVT::i32),
3616                    DAG.getTargetConstant(0, DL, MVT::i32),
3617                    DAG.getTargetConstant(13, DL, MVT::i32),
3618                    DAG.getTargetConstant(0, DL, MVT::i32),
3619                    DAG.getTargetConstant(2, DL, MVT::i32)};
3620   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
3621                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
3622 
3623   SDValue TEB = CurrentTEB.getValue(0);
3624   Chain = CurrentTEB.getValue(1);
3625 
3626   // Load the ThreadLocalStoragePointer from the TEB
3627   // A pointer to the TLS array is located at offset 0x2c from the TEB.
3628   SDValue TLSArray =
3629       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
3630   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
3631 
3632   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3633   // offset into the TLSArray.
3634 
3635   // Load the TLS index from the C runtime
3636   SDValue TLSIndex =
3637       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
3638   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
3639   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
3640 
3641   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
3642                               DAG.getConstant(2, DL, MVT::i32));
3643   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
3644                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
3645                             MachinePointerInfo());
3646 
3647   // Get the offset of the start of the .tls section (section base)
3648   const auto *GA = cast<GlobalAddressSDNode>(Op);
3649   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
3650   SDValue Offset = DAG.getLoad(
3651       PtrVT, DL, Chain,
3652       DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
3653                   DAG.getTargetConstantPool(CPV, PtrVT, Align(4))),
3654       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3655 
3656   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
3657 }
3658 
3659 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3660 SDValue
3661 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
3662                                                  SelectionDAG &DAG) const {
3663   SDLoc dl(GA);
3664   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3665   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3666   MachineFunction &MF = DAG.getMachineFunction();
3667   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3668   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3669   ARMConstantPoolValue *CPV =
3670     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3671                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
3672   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3673   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
3674   Argument = DAG.getLoad(
3675       PtrVT, dl, DAG.getEntryNode(), Argument,
3676       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3677   SDValue Chain = Argument.getValue(1);
3678 
3679   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3680   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
3681 
3682   // call __tls_get_addr.
3683   ArgListTy Args;
3684   ArgListEntry Entry;
3685   Entry.Node = Argument;
3686   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
3687   Args.push_back(Entry);
3688 
3689   // FIXME: is there useful debug info available here?
3690   TargetLowering::CallLoweringInfo CLI(DAG);
3691   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3692       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
3693       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
3694 
3695   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3696   return CallResult.first;
3697 }
3698 
3699 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3700 // "local exec" model.
3701 SDValue
3702 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
3703                                         SelectionDAG &DAG,
3704                                         TLSModel::Model model) const {
3705   const GlobalValue *GV = GA->getGlobal();
3706   SDLoc dl(GA);
3707   SDValue Offset;
3708   SDValue Chain = DAG.getEntryNode();
3709   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3710   // Get the Thread Pointer
3711   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3712 
3713   if (model == TLSModel::InitialExec) {
3714     MachineFunction &MF = DAG.getMachineFunction();
3715     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3716     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3717     // Initial exec model.
3718     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3719     ARMConstantPoolValue *CPV =
3720       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3721                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
3722                                       true);
3723     Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3724     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3725     Offset = DAG.getLoad(
3726         PtrVT, dl, Chain, Offset,
3727         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3728     Chain = Offset.getValue(1);
3729 
3730     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3731     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
3732 
3733     Offset = DAG.getLoad(
3734         PtrVT, dl, Chain, Offset,
3735         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3736   } else {
3737     // local exec model
3738     assert(model == TLSModel::LocalExec);
3739     ARMConstantPoolValue *CPV =
3740       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
3741     Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3742     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3743     Offset = DAG.getLoad(
3744         PtrVT, dl, Chain, Offset,
3745         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3746   }
3747 
3748   // The address of the thread local variable is the add of the thread
3749   // pointer with the offset of the variable.
3750   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3751 }
3752 
3753 SDValue
3754 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3755   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3756   if (DAG.getTarget().useEmulatedTLS())
3757     return LowerToTLSEmulatedModel(GA, DAG);
3758 
3759   if (Subtarget->isTargetDarwin())
3760     return LowerGlobalTLSAddressDarwin(Op, DAG);
3761 
3762   if (Subtarget->isTargetWindows())
3763     return LowerGlobalTLSAddressWindows(Op, DAG);
3764 
3765   // TODO: implement the "local dynamic" model
3766   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3767   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3768 
3769   switch (model) {
3770     case TLSModel::GeneralDynamic:
3771     case TLSModel::LocalDynamic:
3772       return LowerToTLSGeneralDynamicModel(GA, DAG);
3773     case TLSModel::InitialExec:
3774     case TLSModel::LocalExec:
3775       return LowerToTLSExecModels(GA, DAG, model);
3776   }
3777   llvm_unreachable("bogus TLS model");
3778 }
3779 
3780 /// Return true if all users of V are within function F, looking through
3781 /// ConstantExprs.
3782 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3783   SmallVector<const User*,4> Worklist(V->users());
3784   while (!Worklist.empty()) {
3785     auto *U = Worklist.pop_back_val();
3786     if (isa<ConstantExpr>(U)) {
3787       append_range(Worklist, U->users());
3788       continue;
3789     }
3790 
3791     auto *I = dyn_cast<Instruction>(U);
3792     if (!I || I->getParent()->getParent() != F)
3793       return false;
3794   }
3795   return true;
3796 }
3797 
3798 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
3799                                      const GlobalValue *GV, SelectionDAG &DAG,
3800                                      EVT PtrVT, const SDLoc &dl) {
3801   // If we're creating a pool entry for a constant global with unnamed address,
3802   // and the global is small enough, we can emit it inline into the constant pool
3803   // to save ourselves an indirection.
3804   //
3805   // This is a win if the constant is only used in one function (so it doesn't
3806   // need to be duplicated) or duplicating the constant wouldn't increase code
3807   // size (implying the constant is no larger than 4 bytes).
3808   const Function &F = DAG.getMachineFunction().getFunction();
3809 
3810   // We rely on this decision to inline being idemopotent and unrelated to the
3811   // use-site. We know that if we inline a variable at one use site, we'll
3812   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3813   // doesn't know about this optimization, so bail out if it's enabled else
3814   // we could decide to inline here (and thus never emit the GV) but require
3815   // the GV from fast-isel generated code.
3816   if (!EnableConstpoolPromotion ||
3817       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3818       return SDValue();
3819 
3820   auto *GVar = dyn_cast<GlobalVariable>(GV);
3821   if (!GVar || !GVar->hasInitializer() ||
3822       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3823       !GVar->hasLocalLinkage())
3824     return SDValue();
3825 
3826   // If we inline a value that contains relocations, we move the relocations
3827   // from .data to .text. This is not allowed in position-independent code.
3828   auto *Init = GVar->getInitializer();
3829   if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
3830       Init->needsDynamicRelocation())
3831     return SDValue();
3832 
3833   // The constant islands pass can only really deal with alignment requests
3834   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3835   // any type wanting greater alignment requirements than 4 bytes. We also
3836   // can only promote constants that are multiples of 4 bytes in size or
3837   // are paddable to a multiple of 4. Currently we only try and pad constants
3838   // that are strings for simplicity.
3839   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3840   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3841   Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar);
3842   unsigned RequiredPadding = 4 - (Size % 4);
3843   bool PaddingPossible =
3844     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3845   if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize ||
3846       Size == 0)
3847     return SDValue();
3848 
3849   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3850   MachineFunction &MF = DAG.getMachineFunction();
3851   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3852 
3853   // We can't bloat the constant pool too much, else the ConstantIslands pass
3854   // may fail to converge. If we haven't promoted this global yet (it may have
3855   // multiple uses), and promoting it would increase the constant pool size (Sz
3856   // > 4), ensure we have space to do so up to MaxTotal.
3857   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3858     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3859         ConstpoolPromotionMaxTotal)
3860       return SDValue();
3861 
3862   // This is only valid if all users are in a single function; we can't clone
3863   // the constant in general. The LLVM IR unnamed_addr allows merging
3864   // constants, but not cloning them.
3865   //
3866   // We could potentially allow cloning if we could prove all uses of the
3867   // constant in the current function don't care about the address, like
3868   // printf format strings. But that isn't implemented for now.
3869   if (!allUsersAreInFunction(GVar, &F))
3870     return SDValue();
3871 
3872   // We're going to inline this global. Pad it out if needed.
3873   if (RequiredPadding != 4) {
3874     StringRef S = CDAInit->getAsString();
3875 
3876     SmallVector<uint8_t,16> V(S.size());
3877     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3878     while (RequiredPadding--)
3879       V.push_back(0);
3880     Init = ConstantDataArray::get(*DAG.getContext(), V);
3881   }
3882 
3883   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3884   SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4));
3885   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3886     AFI->markGlobalAsPromotedToConstantPool(GVar);
3887     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3888                                       PaddedSize - 4);
3889   }
3890   ++NumConstpoolPromoted;
3891   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3892 }
3893 
3894 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
3895   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3896     if (!(GV = GA->getAliaseeObject()))
3897       return false;
3898   if (const auto *V = dyn_cast<GlobalVariable>(GV))
3899     return V->isConstant();
3900   return isa<Function>(GV);
3901 }
3902 
3903 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3904                                               SelectionDAG &DAG) const {
3905   switch (Subtarget->getTargetTriple().getObjectFormat()) {
3906   default: llvm_unreachable("unknown object format");
3907   case Triple::COFF:
3908     return LowerGlobalAddressWindows(Op, DAG);
3909   case Triple::ELF:
3910     return LowerGlobalAddressELF(Op, DAG);
3911   case Triple::MachO:
3912     return LowerGlobalAddressDarwin(Op, DAG);
3913   }
3914 }
3915 
3916 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3917                                                  SelectionDAG &DAG) const {
3918   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3919   SDLoc dl(Op);
3920   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3921   const TargetMachine &TM = getTargetMachine();
3922   bool IsRO = isReadOnly(GV);
3923 
3924   // promoteToConstantPool only if not generating XO text section
3925   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3926     if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
3927       return V;
3928 
3929   if (isPositionIndependent()) {
3930     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3931     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3932                                            UseGOT_PREL ? ARMII::MO_GOT : 0);
3933     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3934     if (UseGOT_PREL)
3935       Result =
3936           DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3937                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3938     return Result;
3939   } else if (Subtarget->isROPI() && IsRO) {
3940     // PC-relative.
3941     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3942     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3943     return Result;
3944   } else if (Subtarget->isRWPI() && !IsRO) {
3945     // SB-relative.
3946     SDValue RelAddr;
3947     if (Subtarget->useMovt()) {
3948       ++NumMovwMovt;
3949       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3950       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3951     } else { // use literal pool for address constant
3952       ARMConstantPoolValue *CPV =
3953         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3954       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3955       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3956       RelAddr = DAG.getLoad(
3957           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3958           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3959     }
3960     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3961     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3962     return Result;
3963   }
3964 
3965   // If we have T2 ops, we can materialize the address directly via movt/movw
3966   // pair. This is always cheaper. If need to generate Execute Only code, and we
3967   // only have Thumb1 available, we can't use a constant pool and are forced to
3968   // use immediate relocations.
3969   if (Subtarget->useMovt() || Subtarget->genExecuteOnly()) {
3970     if (Subtarget->useMovt())
3971       ++NumMovwMovt;
3972     // FIXME: Once remat is capable of dealing with instructions with register
3973     // operands, expand this into two nodes.
3974     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3975                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3976   } else {
3977     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4));
3978     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3979     return DAG.getLoad(
3980         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3981         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3982   }
3983 }
3984 
3985 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3986                                                     SelectionDAG &DAG) const {
3987   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3988          "ROPI/RWPI not currently supported for Darwin");
3989   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3990   SDLoc dl(Op);
3991   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3992 
3993   if (Subtarget->useMovt())
3994     ++NumMovwMovt;
3995 
3996   // FIXME: Once remat is capable of dealing with instructions with register
3997   // operands, expand this into multiple nodes
3998   unsigned Wrapper =
3999       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
4000 
4001   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
4002   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
4003 
4004   if (Subtarget->isGVIndirectSymbol(GV))
4005     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
4006                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
4007   return Result;
4008 }
4009 
4010 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
4011                                                      SelectionDAG &DAG) const {
4012   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
4013   assert(Subtarget->useMovt() &&
4014          "Windows on ARM expects to use movw/movt");
4015   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
4016          "ROPI/RWPI not currently supported for Windows");
4017 
4018   const TargetMachine &TM = getTargetMachine();
4019   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
4020   ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
4021   if (GV->hasDLLImportStorageClass())
4022     TargetFlags = ARMII::MO_DLLIMPORT;
4023   else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
4024     TargetFlags = ARMII::MO_COFFSTUB;
4025   EVT PtrVT = getPointerTy(DAG.getDataLayout());
4026   SDValue Result;
4027   SDLoc DL(Op);
4028 
4029   ++NumMovwMovt;
4030 
4031   // FIXME: Once remat is capable of dealing with instructions with register
4032   // operands, expand this into two nodes.
4033   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
4034                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
4035                                                   TargetFlags));
4036   if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
4037     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
4038                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
4039   return Result;
4040 }
4041 
4042 SDValue
4043 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
4044   SDLoc dl(Op);
4045   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
4046   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
4047                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
4048                      Op.getOperand(1), Val);
4049 }
4050 
4051 SDValue
4052 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
4053   SDLoc dl(Op);
4054   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
4055                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
4056 }
4057 
4058 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
4059                                                       SelectionDAG &DAG) const {
4060   SDLoc dl(Op);
4061   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
4062                      Op.getOperand(0));
4063 }
4064 
4065 SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
4066     SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {
4067   unsigned IntNo =
4068       cast<ConstantSDNode>(
4069           Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other))
4070           ->getZExtValue();
4071   switch (IntNo) {
4072     default:
4073       return SDValue();  // Don't custom lower most intrinsics.
4074     case Intrinsic::arm_gnu_eabi_mcount: {
4075       MachineFunction &MF = DAG.getMachineFunction();
4076       EVT PtrVT = getPointerTy(DAG.getDataLayout());
4077       SDLoc dl(Op);
4078       SDValue Chain = Op.getOperand(0);
4079       // call "\01__gnu_mcount_nc"
4080       const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
4081       const uint32_t *Mask =
4082           ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
4083       assert(Mask && "Missing call preserved mask for calling convention");
4084       // Mark LR an implicit live-in.
4085       Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
4086       SDValue ReturnAddress =
4087           DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
4088       constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
4089       SDValue Callee =
4090           DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0);
4091       SDValue RegisterMask = DAG.getRegisterMask(Mask);
4092       if (Subtarget->isThumb())
4093         return SDValue(
4094             DAG.getMachineNode(
4095                 ARM::tBL_PUSHLR, dl, ResultTys,
4096                 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
4097                  DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
4098             0);
4099       return SDValue(
4100           DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys,
4101                              {ReturnAddress, Callee, RegisterMask, Chain}),
4102           0);
4103     }
4104   }
4105 }
4106 
4107 SDValue
4108 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
4109                                           const ARMSubtarget *Subtarget) const {
4110   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4111   SDLoc dl(Op);
4112   switch (IntNo) {
4113   default: return SDValue();    // Don't custom lower most intrinsics.
4114   case Intrinsic::thread_pointer: {
4115     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4116     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
4117   }
4118   case Intrinsic::arm_cls: {
4119     const SDValue &Operand = Op.getOperand(1);
4120     const EVT VTy = Op.getValueType();
4121     SDValue SRA =
4122         DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy));
4123     SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand);
4124     SDValue SHL =
4125         DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy));
4126     SDValue OR =
4127         DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy));
4128     SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR);
4129     return Result;
4130   }
4131   case Intrinsic::arm_cls64: {
4132     // cls(x) = if cls(hi(x)) != 31 then cls(hi(x))
4133     //          else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x)))
4134     const SDValue &Operand = Op.getOperand(1);
4135     const EVT VTy = Op.getValueType();
4136     SDValue Lo, Hi;
4137     std::tie(Lo, Hi) = DAG.SplitScalar(Operand, dl, VTy, VTy);
4138     SDValue Constant0 = DAG.getConstant(0, dl, VTy);
4139     SDValue Constant1 = DAG.getConstant(1, dl, VTy);
4140     SDValue Constant31 = DAG.getConstant(31, dl, VTy);
4141     SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31);
4142     SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi);
4143     SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1);
4144     SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1);
4145     SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi);
4146     SDValue CheckLo =
4147         DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ);
4148     SDValue HiIsZero =
4149         DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ);
4150     SDValue AdjustedLo =
4151         DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy));
4152     SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo);
4153     SDValue Result =
4154         DAG.getSelect(dl, VTy, CheckLo,
4155                       DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi);
4156     return Result;
4157   }
4158   case Intrinsic::eh_sjlj_lsda: {
4159     MachineFunction &MF = DAG.getMachineFunction();
4160     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4161     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
4162     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4163     SDValue CPAddr;
4164     bool IsPositionIndependent = isPositionIndependent();
4165     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
4166     ARMConstantPoolValue *CPV =
4167       ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
4168                                       ARMCP::CPLSDA, PCAdj);
4169     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
4170     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
4171     SDValue Result = DAG.getLoad(
4172         PtrVT, dl, DAG.getEntryNode(), CPAddr,
4173         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
4174 
4175     if (IsPositionIndependent) {
4176       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
4177       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
4178     }
4179     return Result;
4180   }
4181   case Intrinsic::arm_neon_vabs:
4182     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
4183                         Op.getOperand(1));
4184   case Intrinsic::arm_neon_vmulls:
4185   case Intrinsic::arm_neon_vmullu: {
4186     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
4187       ? ARMISD::VMULLs : ARMISD::VMULLu;
4188     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
4189                        Op.getOperand(1), Op.getOperand(2));
4190   }
4191   case Intrinsic::arm_neon_vminnm:
4192   case Intrinsic::arm_neon_vmaxnm: {
4193     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
4194       ? ISD::FMINNUM : ISD::FMAXNUM;
4195     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
4196                        Op.getOperand(1), Op.getOperand(2));
4197   }
4198   case Intrinsic::arm_neon_vminu:
4199   case Intrinsic::arm_neon_vmaxu: {
4200     if (Op.getValueType().isFloatingPoint())
4201       return SDValue();
4202     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
4203       ? ISD::UMIN : ISD::UMAX;
4204     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
4205                          Op.getOperand(1), Op.getOperand(2));
4206   }
4207   case Intrinsic::arm_neon_vmins:
4208   case Intrinsic::arm_neon_vmaxs: {
4209     // v{min,max}s is overloaded between signed integers and floats.
4210     if (!Op.getValueType().isFloatingPoint()) {
4211       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
4212         ? ISD::SMIN : ISD::SMAX;
4213       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
4214                          Op.getOperand(1), Op.getOperand(2));
4215     }
4216     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
4217       ? ISD::FMINIMUM : ISD::FMAXIMUM;
4218     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
4219                        Op.getOperand(1), Op.getOperand(2));
4220   }
4221   case Intrinsic::arm_neon_vtbl1:
4222     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
4223                        Op.getOperand(1), Op.getOperand(2));
4224   case Intrinsic::arm_neon_vtbl2:
4225     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
4226                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4227   case Intrinsic::arm_mve_pred_i2v:
4228   case Intrinsic::arm_mve_pred_v2i:
4229     return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(),
4230                        Op.getOperand(1));
4231   case Intrinsic::arm_mve_vreinterpretq:
4232     return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(),
4233                        Op.getOperand(1));
4234   case Intrinsic::arm_mve_lsll:
4235     return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(),
4236                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4237   case Intrinsic::arm_mve_asrl:
4238     return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(),
4239                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4240   }
4241 }
4242 
4243 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
4244                                  const ARMSubtarget *Subtarget) {
4245   SDLoc dl(Op);
4246   ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
4247   auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
4248   if (SSID == SyncScope::SingleThread)
4249     return Op;
4250 
4251   if (!Subtarget->hasDataBarrier()) {
4252     // Some ARMv6 cpus can support data barriers with an mcr instruction.
4253     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
4254     // here.
4255     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
4256            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
4257     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
4258                        DAG.getConstant(0, dl, MVT::i32));
4259   }
4260 
4261   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
4262   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
4263   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
4264   if (Subtarget->isMClass()) {
4265     // Only a full system barrier exists in the M-class architectures.
4266     Domain = ARM_MB::SY;
4267   } else if (Subtarget->preferISHSTBarriers() &&
4268              Ord == AtomicOrdering::Release) {
4269     // Swift happens to implement ISHST barriers in a way that's compatible with
4270     // Release semantics but weaker than ISH so we'd be fools not to use
4271     // it. Beware: other processors probably don't!
4272     Domain = ARM_MB::ISHST;
4273   }
4274 
4275   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
4276                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
4277                      DAG.getConstant(Domain, dl, MVT::i32));
4278 }
4279 
4280 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
4281                              const ARMSubtarget *Subtarget) {
4282   // ARM pre v5TE and Thumb1 does not have preload instructions.
4283   if (!(Subtarget->isThumb2() ||
4284         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
4285     // Just preserve the chain.
4286     return Op.getOperand(0);
4287 
4288   SDLoc dl(Op);
4289   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
4290   if (!isRead &&
4291       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
4292     // ARMv7 with MP extension has PLDW.
4293     return Op.getOperand(0);
4294 
4295   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
4296   if (Subtarget->isThumb()) {
4297     // Invert the bits.
4298     isRead = ~isRead & 1;
4299     isData = ~isData & 1;
4300   }
4301 
4302   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
4303                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
4304                      DAG.getConstant(isData, dl, MVT::i32));
4305 }
4306 
4307 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
4308   MachineFunction &MF = DAG.getMachineFunction();
4309   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
4310 
4311   // vastart just stores the address of the VarArgsFrameIndex slot into the
4312   // memory location argument.
4313   SDLoc dl(Op);
4314   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4315   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4316   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4317   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
4318                       MachinePointerInfo(SV));
4319 }
4320 
4321 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
4322                                                 CCValAssign &NextVA,
4323                                                 SDValue &Root,
4324                                                 SelectionDAG &DAG,
4325                                                 const SDLoc &dl) const {
4326   MachineFunction &MF = DAG.getMachineFunction();
4327   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4328 
4329   const TargetRegisterClass *RC;
4330   if (AFI->isThumb1OnlyFunction())
4331     RC = &ARM::tGPRRegClass;
4332   else
4333     RC = &ARM::GPRRegClass;
4334 
4335   // Transform the arguments stored in physical registers into virtual ones.
4336   Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4337   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4338 
4339   SDValue ArgValue2;
4340   if (NextVA.isMemLoc()) {
4341     MachineFrameInfo &MFI = MF.getFrameInfo();
4342     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
4343 
4344     // Create load node to retrieve arguments from the stack.
4345     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4346     ArgValue2 = DAG.getLoad(
4347         MVT::i32, dl, Root, FIN,
4348         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4349   } else {
4350     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
4351     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4352   }
4353   if (!Subtarget->isLittle())
4354     std::swap (ArgValue, ArgValue2);
4355   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
4356 }
4357 
4358 // The remaining GPRs hold either the beginning of variable-argument
4359 // data, or the beginning of an aggregate passed by value (usually
4360 // byval).  Either way, we allocate stack slots adjacent to the data
4361 // provided by our caller, and store the unallocated registers there.
4362 // If this is a variadic function, the va_list pointer will begin with
4363 // these values; otherwise, this reassembles a (byval) structure that
4364 // was split between registers and memory.
4365 // Return: The frame index registers were stored into.
4366 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
4367                                       const SDLoc &dl, SDValue &Chain,
4368                                       const Value *OrigArg,
4369                                       unsigned InRegsParamRecordIdx,
4370                                       int ArgOffset, unsigned ArgSize) const {
4371   // Currently, two use-cases possible:
4372   // Case #1. Non-var-args function, and we meet first byval parameter.
4373   //          Setup first unallocated register as first byval register;
4374   //          eat all remained registers
4375   //          (these two actions are performed by HandleByVal method).
4376   //          Then, here, we initialize stack frame with
4377   //          "store-reg" instructions.
4378   // Case #2. Var-args function, that doesn't contain byval parameters.
4379   //          The same: eat all remained unallocated registers,
4380   //          initialize stack frame.
4381 
4382   MachineFunction &MF = DAG.getMachineFunction();
4383   MachineFrameInfo &MFI = MF.getFrameInfo();
4384   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4385   unsigned RBegin, REnd;
4386   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
4387     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
4388   } else {
4389     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4390     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
4391     REnd = ARM::R4;
4392   }
4393 
4394   if (REnd != RBegin)
4395     ArgOffset = -4 * (ARM::R4 - RBegin);
4396 
4397   auto PtrVT = getPointerTy(DAG.getDataLayout());
4398   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
4399   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
4400 
4401   SmallVector<SDValue, 4> MemOps;
4402   const TargetRegisterClass *RC =
4403       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
4404 
4405   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
4406     Register VReg = MF.addLiveIn(Reg, RC);
4407     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4408     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4409                                  MachinePointerInfo(OrigArg, 4 * i));
4410     MemOps.push_back(Store);
4411     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
4412   }
4413 
4414   if (!MemOps.empty())
4415     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4416   return FrameIndex;
4417 }
4418 
4419 // Setup stack frame, the va_list pointer will start from.
4420 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
4421                                              const SDLoc &dl, SDValue &Chain,
4422                                              unsigned ArgOffset,
4423                                              unsigned TotalArgRegsSaveSize,
4424                                              bool ForceMutable) const {
4425   MachineFunction &MF = DAG.getMachineFunction();
4426   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4427 
4428   // Try to store any remaining integer argument regs
4429   // to their spots on the stack so that they may be loaded by dereferencing
4430   // the result of va_next.
4431   // If there is no regs to be stored, just point address after last
4432   // argument passed via stack.
4433   int FrameIndex = StoreByValRegs(
4434       CCInfo, DAG, dl, Chain, nullptr, CCInfo.getInRegsParamsCount(),
4435       CCInfo.getStackSize(), std::max(4U, TotalArgRegsSaveSize));
4436   AFI->setVarArgsFrameIndex(FrameIndex);
4437 }
4438 
4439 bool ARMTargetLowering::splitValueIntoRegisterParts(
4440     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4441     unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4442   EVT ValueVT = Val.getValueType();
4443   if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) {
4444     unsigned ValueBits = ValueVT.getSizeInBits();
4445     unsigned PartBits = PartVT.getSizeInBits();
4446     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
4447     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
4448     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
4449     Parts[0] = Val;
4450     return true;
4451   }
4452   return false;
4453 }
4454 
4455 SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
4456     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
4457     MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
4458   if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) {
4459     unsigned ValueBits = ValueVT.getSizeInBits();
4460     unsigned PartBits = PartVT.getSizeInBits();
4461     SDValue Val = Parts[0];
4462 
4463     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
4464     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
4465     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
4466     return Val;
4467   }
4468   return SDValue();
4469 }
4470 
4471 SDValue ARMTargetLowering::LowerFormalArguments(
4472     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4473     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4474     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4475   MachineFunction &MF = DAG.getMachineFunction();
4476   MachineFrameInfo &MFI = MF.getFrameInfo();
4477 
4478   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4479 
4480   // Assign locations to all of the incoming arguments.
4481   SmallVector<CCValAssign, 16> ArgLocs;
4482   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
4483                  *DAG.getContext());
4484   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
4485 
4486   SmallVector<SDValue, 16> ArgValues;
4487   SDValue ArgValue;
4488   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
4489   unsigned CurArgIdx = 0;
4490 
4491   // Initially ArgRegsSaveSize is zero.
4492   // Then we increase this value each time we meet byval parameter.
4493   // We also increase this value in case of varargs function.
4494   AFI->setArgRegsSaveSize(0);
4495 
4496   // Calculate the amount of stack space that we need to allocate to store
4497   // byval and variadic arguments that are passed in registers.
4498   // We need to know this before we allocate the first byval or variadic
4499   // argument, as they will be allocated a stack slot below the CFA (Canonical
4500   // Frame Address, the stack pointer at entry to the function).
4501   unsigned ArgRegBegin = ARM::R4;
4502   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4503     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
4504       break;
4505 
4506     CCValAssign &VA = ArgLocs[i];
4507     unsigned Index = VA.getValNo();
4508     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
4509     if (!Flags.isByVal())
4510       continue;
4511 
4512     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
4513     unsigned RBegin, REnd;
4514     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
4515     ArgRegBegin = std::min(ArgRegBegin, RBegin);
4516 
4517     CCInfo.nextInRegsParam();
4518   }
4519   CCInfo.rewindByValRegsInfo();
4520 
4521   int lastInsIndex = -1;
4522   if (isVarArg && MFI.hasVAStart()) {
4523     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4524     if (RegIdx != std::size(GPRArgRegs))
4525       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
4526   }
4527 
4528   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
4529   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
4530   auto PtrVT = getPointerTy(DAG.getDataLayout());
4531 
4532   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4533     CCValAssign &VA = ArgLocs[i];
4534     if (Ins[VA.getValNo()].isOrigArg()) {
4535       std::advance(CurOrigArg,
4536                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
4537       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
4538     }
4539     // Arguments stored in registers.
4540     if (VA.isRegLoc()) {
4541       EVT RegVT = VA.getLocVT();
4542 
4543       if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
4544         // f64 and vector types are split up into multiple registers or
4545         // combinations of registers and stack slots.
4546         SDValue ArgValue1 =
4547             GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4548         VA = ArgLocs[++i]; // skip ahead to next loc
4549         SDValue ArgValue2;
4550         if (VA.isMemLoc()) {
4551           int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
4552           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4553           ArgValue2 = DAG.getLoad(
4554               MVT::f64, dl, Chain, FIN,
4555               MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4556         } else {
4557           ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4558         }
4559         ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
4560         ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4561                                ArgValue1, DAG.getIntPtrConstant(0, dl));
4562         ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4563                                ArgValue2, DAG.getIntPtrConstant(1, dl));
4564       } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
4565         ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4566       } else {
4567         const TargetRegisterClass *RC;
4568 
4569         if (RegVT == MVT::f16 || RegVT == MVT::bf16)
4570           RC = &ARM::HPRRegClass;
4571         else if (RegVT == MVT::f32)
4572           RC = &ARM::SPRRegClass;
4573         else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 ||
4574                  RegVT == MVT::v4bf16)
4575           RC = &ARM::DPRRegClass;
4576         else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 ||
4577                  RegVT == MVT::v8bf16)
4578           RC = &ARM::QPRRegClass;
4579         else if (RegVT == MVT::i32)
4580           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
4581                                            : &ARM::GPRRegClass;
4582         else
4583           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
4584 
4585         // Transform the arguments in physical registers into virtual ones.
4586         Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4587         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4588 
4589         // If this value is passed in r0 and has the returned attribute (e.g.
4590         // C++ 'structors), record this fact for later use.
4591         if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) {
4592           AFI->setPreservesR0();
4593         }
4594       }
4595 
4596       // If this is an 8 or 16-bit value, it is really passed promoted
4597       // to 32 bits.  Insert an assert[sz]ext to capture this, then
4598       // truncate to the right size.
4599       switch (VA.getLocInfo()) {
4600       default: llvm_unreachable("Unknown loc info!");
4601       case CCValAssign::Full: break;
4602       case CCValAssign::BCvt:
4603         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
4604         break;
4605       case CCValAssign::SExt:
4606         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4607                                DAG.getValueType(VA.getValVT()));
4608         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4609         break;
4610       case CCValAssign::ZExt:
4611         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4612                                DAG.getValueType(VA.getValVT()));
4613         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4614         break;
4615       }
4616 
4617       // f16 arguments have their size extended to 4 bytes and passed as if they
4618       // had been copied to the LSBs of a 32-bit register.
4619       // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
4620       if (VA.needsCustom() &&
4621           (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
4622         ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
4623 
4624       InVals.push_back(ArgValue);
4625     } else { // VA.isRegLoc()
4626       // Only arguments passed on the stack should make it here.
4627       assert(VA.isMemLoc());
4628       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
4629 
4630       int index = VA.getValNo();
4631 
4632       // Some Ins[] entries become multiple ArgLoc[] entries.
4633       // Process them only once.
4634       if (index != lastInsIndex)
4635         {
4636           ISD::ArgFlagsTy Flags = Ins[index].Flags;
4637           // FIXME: For now, all byval parameter objects are marked mutable.
4638           // This can be changed with more analysis.
4639           // In case of tail call optimization mark all arguments mutable.
4640           // Since they could be overwritten by lowering of arguments in case of
4641           // a tail call.
4642           if (Flags.isByVal()) {
4643             assert(Ins[index].isOrigArg() &&
4644                    "Byval arguments cannot be implicit");
4645             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
4646 
4647             int FrameIndex = StoreByValRegs(
4648                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
4649                 VA.getLocMemOffset(), Flags.getByValSize());
4650             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
4651             CCInfo.nextInRegsParam();
4652           } else {
4653             unsigned FIOffset = VA.getLocMemOffset();
4654             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
4655                                            FIOffset, true);
4656 
4657             // Create load nodes to retrieve arguments from the stack.
4658             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4659             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
4660                                          MachinePointerInfo::getFixedStack(
4661                                              DAG.getMachineFunction(), FI)));
4662           }
4663           lastInsIndex = index;
4664         }
4665     }
4666   }
4667 
4668   // varargs
4669   if (isVarArg && MFI.hasVAStart()) {
4670     VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getStackSize(),
4671                          TotalArgRegsSaveSize);
4672     if (AFI->isCmseNSEntryFunction()) {
4673       DiagnosticInfoUnsupported Diag(
4674           DAG.getMachineFunction().getFunction(),
4675           "secure entry function must not be variadic", dl.getDebugLoc());
4676       DAG.getContext()->diagnose(Diag);
4677     }
4678   }
4679 
4680   unsigned StackArgSize = CCInfo.getStackSize();
4681   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
4682   if (canGuaranteeTCO(CallConv, TailCallOpt)) {
4683     // The only way to guarantee a tail call is if the callee restores its
4684     // argument area, but it must also keep the stack aligned when doing so.
4685     const DataLayout &DL = DAG.getDataLayout();
4686     StackArgSize = alignTo(StackArgSize, DL.getStackAlignment());
4687 
4688     AFI->setArgumentStackToRestore(StackArgSize);
4689   }
4690   AFI->setArgumentStackSize(StackArgSize);
4691 
4692   if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) {
4693     DiagnosticInfoUnsupported Diag(
4694         DAG.getMachineFunction().getFunction(),
4695         "secure entry function requires arguments on stack", dl.getDebugLoc());
4696     DAG.getContext()->diagnose(Diag);
4697   }
4698 
4699   return Chain;
4700 }
4701 
4702 /// isFloatingPointZero - Return true if this is +0.0.
4703 static bool isFloatingPointZero(SDValue Op) {
4704   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
4705     return CFP->getValueAPF().isPosZero();
4706   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
4707     // Maybe this has already been legalized into the constant pool?
4708     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
4709       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
4710       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
4711         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
4712           return CFP->getValueAPF().isPosZero();
4713     }
4714   } else if (Op->getOpcode() == ISD::BITCAST &&
4715              Op->getValueType(0) == MVT::f64) {
4716     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
4717     // created by LowerConstantFP().
4718     SDValue BitcastOp = Op->getOperand(0);
4719     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
4720         isNullConstant(BitcastOp->getOperand(0)))
4721       return true;
4722   }
4723   return false;
4724 }
4725 
4726 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
4727 /// the given operands.
4728 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
4729                                      SDValue &ARMcc, SelectionDAG &DAG,
4730                                      const SDLoc &dl) const {
4731   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
4732     unsigned C = RHSC->getZExtValue();
4733     if (!isLegalICmpImmediate((int32_t)C)) {
4734       // Constant does not fit, try adjusting it by one.
4735       switch (CC) {
4736       default: break;
4737       case ISD::SETLT:
4738       case ISD::SETGE:
4739         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
4740           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
4741           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4742         }
4743         break;
4744       case ISD::SETULT:
4745       case ISD::SETUGE:
4746         if (C != 0 && isLegalICmpImmediate(C-1)) {
4747           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
4748           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4749         }
4750         break;
4751       case ISD::SETLE:
4752       case ISD::SETGT:
4753         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
4754           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
4755           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4756         }
4757         break;
4758       case ISD::SETULE:
4759       case ISD::SETUGT:
4760         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
4761           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4762           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4763         }
4764         break;
4765       }
4766     }
4767   } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
4768              (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
4769     // In ARM and Thumb-2, the compare instructions can shift their second
4770     // operand.
4771     CC = ISD::getSetCCSwappedOperands(CC);
4772     std::swap(LHS, RHS);
4773   }
4774 
4775   // Thumb1 has very limited immediate modes, so turning an "and" into a
4776   // shift can save multiple instructions.
4777   //
4778   // If we have (x & C1), and C1 is an appropriate mask, we can transform it
4779   // into "((x << n) >> n)".  But that isn't necessarily profitable on its
4780   // own. If it's the operand to an unsigned comparison with an immediate,
4781   // we can eliminate one of the shifts: we transform
4782   // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
4783   //
4784   // We avoid transforming cases which aren't profitable due to encoding
4785   // details:
4786   //
4787   // 1. C2 fits into the immediate field of a cmp, and the transformed version
4788   // would not; in that case, we're essentially trading one immediate load for
4789   // another.
4790   // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
4791   // 3. C2 is zero; we have other code for this special case.
4792   //
4793   // FIXME: Figure out profitability for Thumb2; we usually can't save an
4794   // instruction, since the AND is always one instruction anyway, but we could
4795   // use narrow instructions in some cases.
4796   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND &&
4797       LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4798       LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
4799       !isSignedIntSetCC(CC)) {
4800     unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
4801     auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
4802     uint64_t RHSV = RHSC->getZExtValue();
4803     if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4804       unsigned ShiftBits = llvm::countl_zero(Mask);
4805       if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
4806         SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32);
4807         LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt);
4808         RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32);
4809       }
4810     }
4811   }
4812 
4813   // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
4814   // single "lsls x, c+1".  The shift sets the "C" and "Z" flags the same
4815   // way a cmp would.
4816   // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
4817   // some tweaks to the heuristics for the previous and->shift transform.
4818   // FIXME: Optimize cases where the LHS isn't a shift.
4819   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
4820       isa<ConstantSDNode>(RHS) &&
4821       cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
4822       CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4823       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
4824     unsigned ShiftAmt =
4825       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
4826     SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
4827                                 DAG.getVTList(MVT::i32, MVT::i32),
4828                                 LHS.getOperand(0),
4829                                 DAG.getConstant(ShiftAmt, dl, MVT::i32));
4830     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
4831                                      Shift.getValue(1), SDValue());
4832     ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32);
4833     return Chain.getValue(1);
4834   }
4835 
4836   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4837 
4838   // If the RHS is a constant zero then the V (overflow) flag will never be
4839   // set. This can allow us to simplify GE to PL or LT to MI, which can be
4840   // simpler for other passes (like the peephole optimiser) to deal with.
4841   if (isNullConstant(RHS)) {
4842     switch (CondCode) {
4843       default: break;
4844       case ARMCC::GE:
4845         CondCode = ARMCC::PL;
4846         break;
4847       case ARMCC::LT:
4848         CondCode = ARMCC::MI;
4849         break;
4850     }
4851   }
4852 
4853   ARMISD::NodeType CompareType;
4854   switch (CondCode) {
4855   default:
4856     CompareType = ARMISD::CMP;
4857     break;
4858   case ARMCC::EQ:
4859   case ARMCC::NE:
4860     // Uses only Z Flag
4861     CompareType = ARMISD::CMPZ;
4862     break;
4863   }
4864   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4865   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
4866 }
4867 
4868 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
4869 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
4870                                      SelectionDAG &DAG, const SDLoc &dl,
4871                                      bool Signaling) const {
4872   assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
4873   SDValue Cmp;
4874   if (!isFloatingPointZero(RHS))
4875     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP,
4876                       dl, MVT::Glue, LHS, RHS);
4877   else
4878     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0,
4879                       dl, MVT::Glue, LHS);
4880   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
4881 }
4882 
4883 /// duplicateCmp - Glue values can have only one use, so this function
4884 /// duplicates a comparison node.
4885 SDValue
4886 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
4887   unsigned Opc = Cmp.getOpcode();
4888   SDLoc DL(Cmp);
4889   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
4890     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4891 
4892   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
4893   Cmp = Cmp.getOperand(0);
4894   Opc = Cmp.getOpcode();
4895   if (Opc == ARMISD::CMPFP)
4896     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4897   else {
4898     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
4899     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
4900   }
4901   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
4902 }
4903 
4904 // This function returns three things: the arithmetic computation itself
4905 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
4906 // comparison and the condition code define the case in which the arithmetic
4907 // computation *does not* overflow.
4908 std::pair<SDValue, SDValue>
4909 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
4910                                  SDValue &ARMcc) const {
4911   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
4912 
4913   SDValue Value, OverflowCmp;
4914   SDValue LHS = Op.getOperand(0);
4915   SDValue RHS = Op.getOperand(1);
4916   SDLoc dl(Op);
4917 
4918   // FIXME: We are currently always generating CMPs because we don't support
4919   // generating CMN through the backend. This is not as good as the natural
4920   // CMP case because it causes a register dependency and cannot be folded
4921   // later.
4922 
4923   switch (Op.getOpcode()) {
4924   default:
4925     llvm_unreachable("Unknown overflow instruction!");
4926   case ISD::SADDO:
4927     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4928     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
4929     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4930     break;
4931   case ISD::UADDO:
4932     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4933     // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4934     // We do not use it in the USUBO case as Value may not be used.
4935     Value = DAG.getNode(ARMISD::ADDC, dl,
4936                         DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
4937                 .getValue(0);
4938     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4939     break;
4940   case ISD::SSUBO:
4941     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4942     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4943     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4944     break;
4945   case ISD::USUBO:
4946     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4947     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4948     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4949     break;
4950   case ISD::UMULO:
4951     // We generate a UMUL_LOHI and then check if the high word is 0.
4952     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4953     Value = DAG.getNode(ISD::UMUL_LOHI, dl,
4954                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4955                         LHS, RHS);
4956     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4957                               DAG.getConstant(0, dl, MVT::i32));
4958     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4959     break;
4960   case ISD::SMULO:
4961     // We generate a SMUL_LOHI and then check if all the bits of the high word
4962     // are the same as the sign bit of the low word.
4963     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4964     Value = DAG.getNode(ISD::SMUL_LOHI, dl,
4965                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4966                         LHS, RHS);
4967     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4968                               DAG.getNode(ISD::SRA, dl, Op.getValueType(),
4969                                           Value.getValue(0),
4970                                           DAG.getConstant(31, dl, MVT::i32)));
4971     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4972     break;
4973   } // switch (...)
4974 
4975   return std::make_pair(Value, OverflowCmp);
4976 }
4977 
4978 SDValue
4979 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
4980   // Let legalize expand this if it isn't a legal type yet.
4981   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4982     return SDValue();
4983 
4984   SDValue Value, OverflowCmp;
4985   SDValue ARMcc;
4986   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4987   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4988   SDLoc dl(Op);
4989   // We use 0 and 1 as false and true values.
4990   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
4991   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
4992   EVT VT = Op.getValueType();
4993 
4994   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
4995                                  ARMcc, CCR, OverflowCmp);
4996 
4997   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
4998   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4999 }
5000 
5001 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
5002                                               SelectionDAG &DAG) {
5003   SDLoc DL(BoolCarry);
5004   EVT CarryVT = BoolCarry.getValueType();
5005 
5006   // This converts the boolean value carry into the carry flag by doing
5007   // ARMISD::SUBC Carry, 1
5008   SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
5009                               DAG.getVTList(CarryVT, MVT::i32),
5010                               BoolCarry, DAG.getConstant(1, DL, CarryVT));
5011   return Carry.getValue(1);
5012 }
5013 
5014 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
5015                                               SelectionDAG &DAG) {
5016   SDLoc DL(Flags);
5017 
5018   // Now convert the carry flag into a boolean carry. We do this
5019   // using ARMISD:ADDE 0, 0, Carry
5020   return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
5021                      DAG.getConstant(0, DL, MVT::i32),
5022                      DAG.getConstant(0, DL, MVT::i32), Flags);
5023 }
5024 
5025 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
5026                                              SelectionDAG &DAG) const {
5027   // Let legalize expand this if it isn't a legal type yet.
5028   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
5029     return SDValue();
5030 
5031   SDValue LHS = Op.getOperand(0);
5032   SDValue RHS = Op.getOperand(1);
5033   SDLoc dl(Op);
5034 
5035   EVT VT = Op.getValueType();
5036   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
5037   SDValue Value;
5038   SDValue Overflow;
5039   switch (Op.getOpcode()) {
5040   default:
5041     llvm_unreachable("Unknown overflow instruction!");
5042   case ISD::UADDO:
5043     Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
5044     // Convert the carry flag into a boolean value.
5045     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
5046     break;
5047   case ISD::USUBO: {
5048     Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
5049     // Convert the carry flag into a boolean value.
5050     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
5051     // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
5052     // value. So compute 1 - C.
5053     Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
5054                            DAG.getConstant(1, dl, MVT::i32), Overflow);
5055     break;
5056   }
5057   }
5058 
5059   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
5060 }
5061 
5062 static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG,
5063                               const ARMSubtarget *Subtarget) {
5064   EVT VT = Op.getValueType();
5065   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() || Subtarget->isThumb1Only())
5066     return SDValue();
5067   if (!VT.isSimple())
5068     return SDValue();
5069 
5070   unsigned NewOpcode;
5071   switch (VT.getSimpleVT().SimpleTy) {
5072   default:
5073     return SDValue();
5074   case MVT::i8:
5075     switch (Op->getOpcode()) {
5076     case ISD::UADDSAT:
5077       NewOpcode = ARMISD::UQADD8b;
5078       break;
5079     case ISD::SADDSAT:
5080       NewOpcode = ARMISD::QADD8b;
5081       break;
5082     case ISD::USUBSAT:
5083       NewOpcode = ARMISD::UQSUB8b;
5084       break;
5085     case ISD::SSUBSAT:
5086       NewOpcode = ARMISD::QSUB8b;
5087       break;
5088     }
5089     break;
5090   case MVT::i16:
5091     switch (Op->getOpcode()) {
5092     case ISD::UADDSAT:
5093       NewOpcode = ARMISD::UQADD16b;
5094       break;
5095     case ISD::SADDSAT:
5096       NewOpcode = ARMISD::QADD16b;
5097       break;
5098     case ISD::USUBSAT:
5099       NewOpcode = ARMISD::UQSUB16b;
5100       break;
5101     case ISD::SSUBSAT:
5102       NewOpcode = ARMISD::QSUB16b;
5103       break;
5104     }
5105     break;
5106   }
5107 
5108   SDLoc dl(Op);
5109   SDValue Add =
5110       DAG.getNode(NewOpcode, dl, MVT::i32,
5111                   DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32),
5112                   DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32));
5113   return DAG.getNode(ISD::TRUNCATE, dl, VT, Add);
5114 }
5115 
5116 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
5117   SDValue Cond = Op.getOperand(0);
5118   SDValue SelectTrue = Op.getOperand(1);
5119   SDValue SelectFalse = Op.getOperand(2);
5120   SDLoc dl(Op);
5121   unsigned Opc = Cond.getOpcode();
5122 
5123   if (Cond.getResNo() == 1 &&
5124       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5125        Opc == ISD::USUBO)) {
5126     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
5127       return SDValue();
5128 
5129     SDValue Value, OverflowCmp;
5130     SDValue ARMcc;
5131     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5132     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5133     EVT VT = Op.getValueType();
5134 
5135     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
5136                    OverflowCmp, DAG);
5137   }
5138 
5139   // Convert:
5140   //
5141   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
5142   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
5143   //
5144   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
5145     const ConstantSDNode *CMOVTrue =
5146       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
5147     const ConstantSDNode *CMOVFalse =
5148       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
5149 
5150     if (CMOVTrue && CMOVFalse) {
5151       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
5152       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
5153 
5154       SDValue True;
5155       SDValue False;
5156       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
5157         True = SelectTrue;
5158         False = SelectFalse;
5159       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
5160         True = SelectFalse;
5161         False = SelectTrue;
5162       }
5163 
5164       if (True.getNode() && False.getNode()) {
5165         EVT VT = Op.getValueType();
5166         SDValue ARMcc = Cond.getOperand(2);
5167         SDValue CCR = Cond.getOperand(3);
5168         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
5169         assert(True.getValueType() == VT);
5170         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
5171       }
5172     }
5173   }
5174 
5175   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
5176   // undefined bits before doing a full-word comparison with zero.
5177   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
5178                      DAG.getConstant(1, dl, Cond.getValueType()));
5179 
5180   return DAG.getSelectCC(dl, Cond,
5181                          DAG.getConstant(0, dl, Cond.getValueType()),
5182                          SelectTrue, SelectFalse, ISD::SETNE);
5183 }
5184 
5185 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
5186                                  bool &swpCmpOps, bool &swpVselOps) {
5187   // Start by selecting the GE condition code for opcodes that return true for
5188   // 'equality'
5189   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
5190       CC == ISD::SETULE || CC == ISD::SETGE  || CC == ISD::SETLE)
5191     CondCode = ARMCC::GE;
5192 
5193   // and GT for opcodes that return false for 'equality'.
5194   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
5195            CC == ISD::SETULT || CC == ISD::SETGT  || CC == ISD::SETLT)
5196     CondCode = ARMCC::GT;
5197 
5198   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
5199   // to swap the compare operands.
5200   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
5201       CC == ISD::SETULT || CC == ISD::SETLE  || CC == ISD::SETLT)
5202     swpCmpOps = true;
5203 
5204   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
5205   // If we have an unordered opcode, we need to swap the operands to the VSEL
5206   // instruction (effectively negating the condition).
5207   //
5208   // This also has the effect of swapping which one of 'less' or 'greater'
5209   // returns true, so we also swap the compare operands. It also switches
5210   // whether we return true for 'equality', so we compensate by picking the
5211   // opposite condition code to our original choice.
5212   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
5213       CC == ISD::SETUGT) {
5214     swpCmpOps = !swpCmpOps;
5215     swpVselOps = !swpVselOps;
5216     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
5217   }
5218 
5219   // 'ordered' is 'anything but unordered', so use the VS condition code and
5220   // swap the VSEL operands.
5221   if (CC == ISD::SETO) {
5222     CondCode = ARMCC::VS;
5223     swpVselOps = true;
5224   }
5225 
5226   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
5227   // code and swap the VSEL operands. Also do this if we don't care about the
5228   // unordered case.
5229   if (CC == ISD::SETUNE || CC == ISD::SETNE) {
5230     CondCode = ARMCC::EQ;
5231     swpVselOps = true;
5232   }
5233 }
5234 
5235 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
5236                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
5237                                    SDValue Cmp, SelectionDAG &DAG) const {
5238   if (!Subtarget->hasFP64() && VT == MVT::f64) {
5239     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
5240                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
5241     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
5242                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
5243 
5244     SDValue TrueLow = TrueVal.getValue(0);
5245     SDValue TrueHigh = TrueVal.getValue(1);
5246     SDValue FalseLow = FalseVal.getValue(0);
5247     SDValue FalseHigh = FalseVal.getValue(1);
5248 
5249     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
5250                               ARMcc, CCR, Cmp);
5251     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
5252                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
5253 
5254     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
5255   } else {
5256     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
5257                        Cmp);
5258   }
5259 }
5260 
5261 static bool isGTorGE(ISD::CondCode CC) {
5262   return CC == ISD::SETGT || CC == ISD::SETGE;
5263 }
5264 
5265 static bool isLTorLE(ISD::CondCode CC) {
5266   return CC == ISD::SETLT || CC == ISD::SETLE;
5267 }
5268 
5269 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
5270 // All of these conditions (and their <= and >= counterparts) will do:
5271 //          x < k ? k : x
5272 //          x > k ? x : k
5273 //          k < x ? x : k
5274 //          k > x ? k : x
5275 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
5276                             const SDValue TrueVal, const SDValue FalseVal,
5277                             const ISD::CondCode CC, const SDValue K) {
5278   return (isGTorGE(CC) &&
5279           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
5280          (isLTorLE(CC) &&
5281           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
5282 }
5283 
5284 // Check if two chained conditionals could be converted into SSAT or USAT.
5285 //
5286 // SSAT can replace a set of two conditional selectors that bound a number to an
5287 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
5288 //
5289 //     x < -k ? -k : (x > k ? k : x)
5290 //     x < -k ? -k : (x < k ? x : k)
5291 //     x > -k ? (x > k ? k : x) : -k
5292 //     x < k ? (x < -k ? -k : x) : k
5293 //     etc.
5294 //
5295 // LLVM canonicalizes these to either a min(max()) or a max(min())
5296 // pattern. This function tries to match one of these and will return a SSAT
5297 // node if successful.
5298 //
5299 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1
5300 // is a power of 2.
5301 static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) {
5302   EVT VT = Op.getValueType();
5303   SDValue V1 = Op.getOperand(0);
5304   SDValue K1 = Op.getOperand(1);
5305   SDValue TrueVal1 = Op.getOperand(2);
5306   SDValue FalseVal1 = Op.getOperand(3);
5307   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5308 
5309   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
5310   if (Op2.getOpcode() != ISD::SELECT_CC)
5311     return SDValue();
5312 
5313   SDValue V2 = Op2.getOperand(0);
5314   SDValue K2 = Op2.getOperand(1);
5315   SDValue TrueVal2 = Op2.getOperand(2);
5316   SDValue FalseVal2 = Op2.getOperand(3);
5317   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
5318 
5319   SDValue V1Tmp = V1;
5320   SDValue V2Tmp = V2;
5321 
5322   // Check that the registers and the constants match a max(min()) or min(max())
5323   // pattern
5324   if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 ||
5325       K2 != FalseVal2 ||
5326       !((isGTorGE(CC1) && isLTorLE(CC2)) || (isLTorLE(CC1) && isGTorGE(CC2))))
5327     return SDValue();
5328 
5329   // Check that the constant in the lower-bound check is
5330   // the opposite of the constant in the upper-bound check
5331   // in 1's complement.
5332   if (!isa<ConstantSDNode>(K1) || !isa<ConstantSDNode>(K2))
5333     return SDValue();
5334 
5335   int64_t Val1 = cast<ConstantSDNode>(K1)->getSExtValue();
5336   int64_t Val2 = cast<ConstantSDNode>(K2)->getSExtValue();
5337   int64_t PosVal = std::max(Val1, Val2);
5338   int64_t NegVal = std::min(Val1, Val2);
5339 
5340   if (!((Val1 > Val2 && isLTorLE(CC1)) || (Val1 < Val2 && isLTorLE(CC2))) ||
5341       !isPowerOf2_64(PosVal + 1))
5342     return SDValue();
5343 
5344   // Handle the difference between USAT (unsigned) and SSAT (signed)
5345   // saturation
5346   // At this point, PosVal is guaranteed to be positive
5347   uint64_t K = PosVal;
5348   SDLoc dl(Op);
5349   if (Val1 == ~Val2)
5350     return DAG.getNode(ARMISD::SSAT, dl, VT, V2Tmp,
5351                        DAG.getConstant(llvm::countr_one(K), dl, VT));
5352   if (NegVal == 0)
5353     return DAG.getNode(ARMISD::USAT, dl, VT, V2Tmp,
5354                        DAG.getConstant(llvm::countr_one(K), dl, VT));
5355 
5356   return SDValue();
5357 }
5358 
5359 // Check if a condition of the type x < k ? k : x can be converted into a
5360 // bit operation instead of conditional moves.
5361 // Currently this is allowed given:
5362 // - The conditions and values match up
5363 // - k is 0 or -1 (all ones)
5364 // This function will not check the last condition, thats up to the caller
5365 // It returns true if the transformation can be made, and in such case
5366 // returns x in V, and k in SatK.
5367 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
5368                                          SDValue &SatK)
5369 {
5370   SDValue LHS = Op.getOperand(0);
5371   SDValue RHS = Op.getOperand(1);
5372   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5373   SDValue TrueVal = Op.getOperand(2);
5374   SDValue FalseVal = Op.getOperand(3);
5375 
5376   SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
5377                                                ? &RHS
5378                                                : nullptr;
5379 
5380   // No constant operation in comparison, early out
5381   if (!K)
5382     return false;
5383 
5384   SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
5385   V = (KTmp == TrueVal) ? FalseVal : TrueVal;
5386   SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
5387 
5388   // If the constant on left and right side, or variable on left and right,
5389   // does not match, early out
5390   if (*K != KTmp || V != VTmp)
5391     return false;
5392 
5393   if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
5394     SatK = *K;
5395     return true;
5396   }
5397 
5398   return false;
5399 }
5400 
5401 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
5402   if (VT == MVT::f32)
5403     return !Subtarget->hasVFP2Base();
5404   if (VT == MVT::f64)
5405     return !Subtarget->hasFP64();
5406   if (VT == MVT::f16)
5407     return !Subtarget->hasFullFP16();
5408   return false;
5409 }
5410 
5411 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
5412   EVT VT = Op.getValueType();
5413   SDLoc dl(Op);
5414 
5415   // Try to convert two saturating conditional selects into a single SSAT
5416   if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2())
5417     if (SDValue SatValue = LowerSaturatingConditional(Op, DAG))
5418       return SatValue;
5419 
5420   // Try to convert expressions of the form x < k ? k : x (and similar forms)
5421   // into more efficient bit operations, which is possible when k is 0 or -1
5422   // On ARM and Thumb-2 which have flexible operand 2 this will result in
5423   // single instructions. On Thumb the shift and the bit operation will be two
5424   // instructions.
5425   // Only allow this transformation on full-width (32-bit) operations
5426   SDValue LowerSatConstant;
5427   SDValue SatValue;
5428   if (VT == MVT::i32 &&
5429       isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
5430     SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
5431                                  DAG.getConstant(31, dl, VT));
5432     if (isNullConstant(LowerSatConstant)) {
5433       SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
5434                                       DAG.getAllOnesConstant(dl, VT));
5435       return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
5436     } else if (isAllOnesConstant(LowerSatConstant))
5437       return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
5438   }
5439 
5440   SDValue LHS = Op.getOperand(0);
5441   SDValue RHS = Op.getOperand(1);
5442   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5443   SDValue TrueVal = Op.getOperand(2);
5444   SDValue FalseVal = Op.getOperand(3);
5445   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal);
5446   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal);
5447 
5448   if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal &&
5449       LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) {
5450     unsigned TVal = CTVal->getZExtValue();
5451     unsigned FVal = CFVal->getZExtValue();
5452     unsigned Opcode = 0;
5453 
5454     if (TVal == ~FVal) {
5455       Opcode = ARMISD::CSINV;
5456     } else if (TVal == ~FVal + 1) {
5457       Opcode = ARMISD::CSNEG;
5458     } else if (TVal + 1 == FVal) {
5459       Opcode = ARMISD::CSINC;
5460     } else if (TVal == FVal + 1) {
5461       Opcode = ARMISD::CSINC;
5462       std::swap(TrueVal, FalseVal);
5463       std::swap(TVal, FVal);
5464       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5465     }
5466 
5467     if (Opcode) {
5468       // If one of the constants is cheaper than another, materialise the
5469       // cheaper one and let the csel generate the other.
5470       if (Opcode != ARMISD::CSINC &&
5471           HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) {
5472         std::swap(TrueVal, FalseVal);
5473         std::swap(TVal, FVal);
5474         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5475       }
5476 
5477       // Attempt to use ZR checking TVal is 0, possibly inverting the condition
5478       // to get there. CSINC not is invertable like the other two (~(~a) == a,
5479       // -(-a) == a, but (a+1)+1 != a).
5480       if (FVal == 0 && Opcode != ARMISD::CSINC) {
5481         std::swap(TrueVal, FalseVal);
5482         std::swap(TVal, FVal);
5483         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5484       }
5485 
5486       // Drops F's value because we can get it by inverting/negating TVal.
5487       FalseVal = TrueVal;
5488 
5489       SDValue ARMcc;
5490       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5491       EVT VT = TrueVal.getValueType();
5492       return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
5493     }
5494   }
5495 
5496   if (isUnsupportedFloatingType(LHS.getValueType())) {
5497     DAG.getTargetLoweringInfo().softenSetCCOperands(
5498         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5499 
5500     // If softenSetCCOperands only returned one value, we should compare it to
5501     // zero.
5502     if (!RHS.getNode()) {
5503       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5504       CC = ISD::SETNE;
5505     }
5506   }
5507 
5508   if (LHS.getValueType() == MVT::i32) {
5509     // Try to generate VSEL on ARMv8.
5510     // The VSEL instruction can't use all the usual ARM condition
5511     // codes: it only has two bits to select the condition code, so it's
5512     // constrained to use only GE, GT, VS and EQ.
5513     //
5514     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
5515     // swap the operands of the previous compare instruction (effectively
5516     // inverting the compare condition, swapping 'less' and 'greater') and
5517     // sometimes need to swap the operands to the VSEL (which inverts the
5518     // condition in the sense of firing whenever the previous condition didn't)
5519     if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
5520                                         TrueVal.getValueType() == MVT::f32 ||
5521                                         TrueVal.getValueType() == MVT::f64)) {
5522       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5523       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
5524           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
5525         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5526         std::swap(TrueVal, FalseVal);
5527       }
5528     }
5529 
5530     SDValue ARMcc;
5531     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5532     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5533     // Choose GE over PL, which vsel does now support
5534     if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
5535       ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
5536     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5537   }
5538 
5539   ARMCC::CondCodes CondCode, CondCode2;
5540   FPCCToARMCC(CC, CondCode, CondCode2);
5541 
5542   // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
5543   // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
5544   // must use VSEL (limited condition codes), due to not having conditional f16
5545   // moves.
5546   if (Subtarget->hasFPARMv8Base() &&
5547       !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
5548       (TrueVal.getValueType() == MVT::f16 ||
5549        TrueVal.getValueType() == MVT::f32 ||
5550        TrueVal.getValueType() == MVT::f64)) {
5551     bool swpCmpOps = false;
5552     bool swpVselOps = false;
5553     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
5554 
5555     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
5556         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
5557       if (swpCmpOps)
5558         std::swap(LHS, RHS);
5559       if (swpVselOps)
5560         std::swap(TrueVal, FalseVal);
5561     }
5562   }
5563 
5564   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5565   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5566   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5567   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5568   if (CondCode2 != ARMCC::AL) {
5569     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
5570     // FIXME: Needs another CMP because flag can have but one use.
5571     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5572     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
5573   }
5574   return Result;
5575 }
5576 
5577 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
5578 /// to morph to an integer compare sequence.
5579 static bool canChangeToInt(SDValue Op, bool &SeenZero,
5580                            const ARMSubtarget *Subtarget) {
5581   SDNode *N = Op.getNode();
5582   if (!N->hasOneUse())
5583     // Otherwise it requires moving the value from fp to integer registers.
5584     return false;
5585   if (!N->getNumValues())
5586     return false;
5587   EVT VT = Op.getValueType();
5588   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
5589     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
5590     // vmrs are very slow, e.g. cortex-a8.
5591     return false;
5592 
5593   if (isFloatingPointZero(Op)) {
5594     SeenZero = true;
5595     return true;
5596   }
5597   return ISD::isNormalLoad(N);
5598 }
5599 
5600 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
5601   if (isFloatingPointZero(Op))
5602     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
5603 
5604   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
5605     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
5606                        Ld->getPointerInfo(), Ld->getAlign(),
5607                        Ld->getMemOperand()->getFlags());
5608 
5609   llvm_unreachable("Unknown VFP cmp argument!");
5610 }
5611 
5612 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
5613                            SDValue &RetVal1, SDValue &RetVal2) {
5614   SDLoc dl(Op);
5615 
5616   if (isFloatingPointZero(Op)) {
5617     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
5618     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
5619     return;
5620   }
5621 
5622   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
5623     SDValue Ptr = Ld->getBasePtr();
5624     RetVal1 =
5625         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
5626                     Ld->getAlign(), Ld->getMemOperand()->getFlags());
5627 
5628     EVT PtrType = Ptr.getValueType();
5629     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
5630                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
5631     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
5632                           Ld->getPointerInfo().getWithOffset(4),
5633                           commonAlignment(Ld->getAlign(), 4),
5634                           Ld->getMemOperand()->getFlags());
5635     return;
5636   }
5637 
5638   llvm_unreachable("Unknown VFP cmp argument!");
5639 }
5640 
5641 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
5642 /// f32 and even f64 comparisons to integer ones.
5643 SDValue
5644 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
5645   SDValue Chain = Op.getOperand(0);
5646   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5647   SDValue LHS = Op.getOperand(2);
5648   SDValue RHS = Op.getOperand(3);
5649   SDValue Dest = Op.getOperand(4);
5650   SDLoc dl(Op);
5651 
5652   bool LHSSeenZero = false;
5653   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
5654   bool RHSSeenZero = false;
5655   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
5656   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
5657     // If unsafe fp math optimization is enabled and there are no other uses of
5658     // the CMP operands, and the condition code is EQ or NE, we can optimize it
5659     // to an integer comparison.
5660     if (CC == ISD::SETOEQ)
5661       CC = ISD::SETEQ;
5662     else if (CC == ISD::SETUNE)
5663       CC = ISD::SETNE;
5664 
5665     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5666     SDValue ARMcc;
5667     if (LHS.getValueType() == MVT::f32) {
5668       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5669                         bitcastf32Toi32(LHS, DAG), Mask);
5670       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5671                         bitcastf32Toi32(RHS, DAG), Mask);
5672       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5673       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5674       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5675                          Chain, Dest, ARMcc, CCR, Cmp);
5676     }
5677 
5678     SDValue LHS1, LHS2;
5679     SDValue RHS1, RHS2;
5680     expandf64Toi32(LHS, DAG, LHS1, LHS2);
5681     expandf64Toi32(RHS, DAG, RHS1, RHS2);
5682     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
5683     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
5684     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5685     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5686     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5687     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
5688     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
5689   }
5690 
5691   return SDValue();
5692 }
5693 
5694 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
5695   SDValue Chain = Op.getOperand(0);
5696   SDValue Cond = Op.getOperand(1);
5697   SDValue Dest = Op.getOperand(2);
5698   SDLoc dl(Op);
5699 
5700   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5701   // instruction.
5702   unsigned Opc = Cond.getOpcode();
5703   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5704                       !Subtarget->isThumb1Only();
5705   if (Cond.getResNo() == 1 &&
5706       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5707        Opc == ISD::USUBO || OptimizeMul)) {
5708     // Only lower legal XALUO ops.
5709     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
5710       return SDValue();
5711 
5712     // The actual operation with overflow check.
5713     SDValue Value, OverflowCmp;
5714     SDValue ARMcc;
5715     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5716 
5717     // Reverse the condition code.
5718     ARMCC::CondCodes CondCode =
5719         (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5720     CondCode = ARMCC::getOppositeCondition(CondCode);
5721     ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5722     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5723 
5724     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5725                        OverflowCmp);
5726   }
5727 
5728   return SDValue();
5729 }
5730 
5731 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
5732   SDValue Chain = Op.getOperand(0);
5733   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5734   SDValue LHS = Op.getOperand(2);
5735   SDValue RHS = Op.getOperand(3);
5736   SDValue Dest = Op.getOperand(4);
5737   SDLoc dl(Op);
5738 
5739   if (isUnsupportedFloatingType(LHS.getValueType())) {
5740     DAG.getTargetLoweringInfo().softenSetCCOperands(
5741         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5742 
5743     // If softenSetCCOperands only returned one value, we should compare it to
5744     // zero.
5745     if (!RHS.getNode()) {
5746       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5747       CC = ISD::SETNE;
5748     }
5749   }
5750 
5751   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5752   // instruction.
5753   unsigned Opc = LHS.getOpcode();
5754   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5755                       !Subtarget->isThumb1Only();
5756   if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
5757       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5758        Opc == ISD::USUBO || OptimizeMul) &&
5759       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
5760     // Only lower legal XALUO ops.
5761     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
5762       return SDValue();
5763 
5764     // The actual operation with overflow check.
5765     SDValue Value, OverflowCmp;
5766     SDValue ARMcc;
5767     std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);
5768 
5769     if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
5770       // Reverse the condition code.
5771       ARMCC::CondCodes CondCode =
5772           (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5773       CondCode = ARMCC::getOppositeCondition(CondCode);
5774       ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5775     }
5776     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5777 
5778     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5779                        OverflowCmp);
5780   }
5781 
5782   if (LHS.getValueType() == MVT::i32) {
5783     SDValue ARMcc;
5784     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5785     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5786     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5787                        Chain, Dest, ARMcc, CCR, Cmp);
5788   }
5789 
5790   if (getTargetMachine().Options.UnsafeFPMath &&
5791       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
5792        CC == ISD::SETNE || CC == ISD::SETUNE)) {
5793     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
5794       return Result;
5795   }
5796 
5797   ARMCC::CondCodes CondCode, CondCode2;
5798   FPCCToARMCC(CC, CondCode, CondCode2);
5799 
5800   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5801   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5802   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5803   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5804   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
5805   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5806   if (CondCode2 != ARMCC::AL) {
5807     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
5808     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
5809     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5810   }
5811   return Res;
5812 }
5813 
5814 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
5815   SDValue Chain = Op.getOperand(0);
5816   SDValue Table = Op.getOperand(1);
5817   SDValue Index = Op.getOperand(2);
5818   SDLoc dl(Op);
5819 
5820   EVT PTy = getPointerTy(DAG.getDataLayout());
5821   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
5822   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
5823   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
5824   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
5825   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
5826   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
5827     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
5828     // which does another jump to the destination. This also makes it easier
5829     // to translate it to TBB / TBH later (Thumb2 only).
5830     // FIXME: This might not work if the function is extremely large.
5831     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
5832                        Addr, Op.getOperand(2), JTI);
5833   }
5834   if (isPositionIndependent() || Subtarget->isROPI()) {
5835     Addr =
5836         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
5837                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5838     Chain = Addr.getValue(1);
5839     Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
5840     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5841   } else {
5842     Addr =
5843         DAG.getLoad(PTy, dl, Chain, Addr,
5844                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5845     Chain = Addr.getValue(1);
5846     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5847   }
5848 }
5849 
5850 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
5851   EVT VT = Op.getValueType();
5852   SDLoc dl(Op);
5853 
5854   if (Op.getValueType().getVectorElementType() == MVT::i32) {
5855     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
5856       return Op;
5857     return DAG.UnrollVectorOp(Op.getNode());
5858   }
5859 
5860   const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16();
5861 
5862   EVT NewTy;
5863   const EVT OpTy = Op.getOperand(0).getValueType();
5864   if (OpTy == MVT::v4f32)
5865     NewTy = MVT::v4i32;
5866   else if (OpTy == MVT::v4f16 && HasFullFP16)
5867     NewTy = MVT::v4i16;
5868   else if (OpTy == MVT::v8f16 && HasFullFP16)
5869     NewTy = MVT::v8i16;
5870   else
5871     llvm_unreachable("Invalid type for custom lowering!");
5872 
5873   if (VT != MVT::v4i16 && VT != MVT::v8i16)
5874     return DAG.UnrollVectorOp(Op.getNode());
5875 
5876   Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5877   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
5878 }
5879 
5880 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
5881   EVT VT = Op.getValueType();
5882   if (VT.isVector())
5883     return LowerVectorFP_TO_INT(Op, DAG);
5884 
5885   bool IsStrict = Op->isStrictFPOpcode();
5886   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
5887 
5888   if (isUnsupportedFloatingType(SrcVal.getValueType())) {
5889     RTLIB::Libcall LC;
5890     if (Op.getOpcode() == ISD::FP_TO_SINT ||
5891         Op.getOpcode() == ISD::STRICT_FP_TO_SINT)
5892       LC = RTLIB::getFPTOSINT(SrcVal.getValueType(),
5893                               Op.getValueType());
5894     else
5895       LC = RTLIB::getFPTOUINT(SrcVal.getValueType(),
5896                               Op.getValueType());
5897     SDLoc Loc(Op);
5898     MakeLibCallOptions CallOptions;
5899     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
5900     SDValue Result;
5901     std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal,
5902                                           CallOptions, Loc, Chain);
5903     return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
5904   }
5905 
5906   // FIXME: Remove this when we have strict fp instruction selection patterns
5907   if (IsStrict) {
5908     SDLoc Loc(Op);
5909     SDValue Result =
5910         DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
5911                                                              : ISD::FP_TO_UINT,
5912                     Loc, Op.getValueType(), SrcVal);
5913     return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
5914   }
5915 
5916   return Op;
5917 }
5918 
5919 static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
5920                                   const ARMSubtarget *Subtarget) {
5921   EVT VT = Op.getValueType();
5922   EVT ToVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
5923   EVT FromVT = Op.getOperand(0).getValueType();
5924 
5925   if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32)
5926     return Op;
5927   if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 &&
5928       Subtarget->hasFP64())
5929     return Op;
5930   if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 &&
5931       Subtarget->hasFullFP16())
5932     return Op;
5933   if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 &&
5934       Subtarget->hasMVEFloatOps())
5935     return Op;
5936   if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 &&
5937       Subtarget->hasMVEFloatOps())
5938     return Op;
5939 
5940   if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16)
5941     return SDValue();
5942 
5943   SDLoc DL(Op);
5944   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
5945   unsigned BW = ToVT.getScalarSizeInBits() - IsSigned;
5946   SDValue CVT = DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
5947                             DAG.getValueType(VT.getScalarType()));
5948   SDValue Max = DAG.getNode(IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, CVT,
5949                             DAG.getConstant((1 << BW) - 1, DL, VT));
5950   if (IsSigned)
5951     Max = DAG.getNode(ISD::SMAX, DL, VT, Max,
5952                       DAG.getConstant(-(1 << BW), DL, VT));
5953   return Max;
5954 }
5955 
5956 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
5957   EVT VT = Op.getValueType();
5958   SDLoc dl(Op);
5959 
5960   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
5961     if (VT.getVectorElementType() == MVT::f32)
5962       return Op;
5963     return DAG.UnrollVectorOp(Op.getNode());
5964   }
5965 
5966   assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
5967           Op.getOperand(0).getValueType() == MVT::v8i16) &&
5968          "Invalid type for custom lowering!");
5969 
5970   const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16();
5971 
5972   EVT DestVecType;
5973   if (VT == MVT::v4f32)
5974     DestVecType = MVT::v4i32;
5975   else if (VT == MVT::v4f16 && HasFullFP16)
5976     DestVecType = MVT::v4i16;
5977   else if (VT == MVT::v8f16 && HasFullFP16)
5978     DestVecType = MVT::v8i16;
5979   else
5980     return DAG.UnrollVectorOp(Op.getNode());
5981 
5982   unsigned CastOpc;
5983   unsigned Opc;
5984   switch (Op.getOpcode()) {
5985   default: llvm_unreachable("Invalid opcode!");
5986   case ISD::SINT_TO_FP:
5987     CastOpc = ISD::SIGN_EXTEND;
5988     Opc = ISD::SINT_TO_FP;
5989     break;
5990   case ISD::UINT_TO_FP:
5991     CastOpc = ISD::ZERO_EXTEND;
5992     Opc = ISD::UINT_TO_FP;
5993     break;
5994   }
5995 
5996   Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5997   return DAG.getNode(Opc, dl, VT, Op);
5998 }
5999 
6000 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
6001   EVT VT = Op.getValueType();
6002   if (VT.isVector())
6003     return LowerVectorINT_TO_FP(Op, DAG);
6004   if (isUnsupportedFloatingType(VT)) {
6005     RTLIB::Libcall LC;
6006     if (Op.getOpcode() == ISD::SINT_TO_FP)
6007       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
6008                               Op.getValueType());
6009     else
6010       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
6011                               Op.getValueType());
6012     MakeLibCallOptions CallOptions;
6013     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
6014                        CallOptions, SDLoc(Op)).first;
6015   }
6016 
6017   return Op;
6018 }
6019 
6020 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
6021   // Implement fcopysign with a fabs and a conditional fneg.
6022   SDValue Tmp0 = Op.getOperand(0);
6023   SDValue Tmp1 = Op.getOperand(1);
6024   SDLoc dl(Op);
6025   EVT VT = Op.getValueType();
6026   EVT SrcVT = Tmp1.getValueType();
6027   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
6028     Tmp0.getOpcode() == ARMISD::VMOVDRR;
6029   bool UseNEON = !InGPR && Subtarget->hasNEON();
6030 
6031   if (UseNEON) {
6032     // Use VBSL to copy the sign bit.
6033     unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80);
6034     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
6035                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
6036     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
6037     if (VT == MVT::f64)
6038       Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
6039                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
6040                          DAG.getConstant(32, dl, MVT::i32));
6041     else /*if (VT == MVT::f32)*/
6042       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
6043     if (SrcVT == MVT::f32) {
6044       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
6045       if (VT == MVT::f64)
6046         Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
6047                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
6048                            DAG.getConstant(32, dl, MVT::i32));
6049     } else if (VT == MVT::f32)
6050       Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64,
6051                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
6052                          DAG.getConstant(32, dl, MVT::i32));
6053     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
6054     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
6055 
6056     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
6057                                             dl, MVT::i32);
6058     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
6059     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
6060                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
6061 
6062     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
6063                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
6064                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
6065     if (VT == MVT::f32) {
6066       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
6067       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
6068                         DAG.getConstant(0, dl, MVT::i32));
6069     } else {
6070       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
6071     }
6072 
6073     return Res;
6074   }
6075 
6076   // Bitcast operand 1 to i32.
6077   if (SrcVT == MVT::f64)
6078     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
6079                        Tmp1).getValue(1);
6080   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
6081 
6082   // Or in the signbit with integer operations.
6083   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
6084   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
6085   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
6086   if (VT == MVT::f32) {
6087     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
6088                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
6089     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
6090                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
6091   }
6092 
6093   // f64: Or the high part with signbit and then combine two parts.
6094   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
6095                      Tmp0);
6096   SDValue Lo = Tmp0.getValue(0);
6097   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
6098   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
6099   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
6100 }
6101 
6102 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
6103   MachineFunction &MF = DAG.getMachineFunction();
6104   MachineFrameInfo &MFI = MF.getFrameInfo();
6105   MFI.setReturnAddressIsTaken(true);
6106 
6107   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
6108     return SDValue();
6109 
6110   EVT VT = Op.getValueType();
6111   SDLoc dl(Op);
6112   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6113   if (Depth) {
6114     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
6115     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
6116     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
6117                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
6118                        MachinePointerInfo());
6119   }
6120 
6121   // Return LR, which contains the return address. Mark it an implicit live-in.
6122   Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
6123   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
6124 }
6125 
6126 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
6127   const ARMBaseRegisterInfo &ARI =
6128     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
6129   MachineFunction &MF = DAG.getMachineFunction();
6130   MachineFrameInfo &MFI = MF.getFrameInfo();
6131   MFI.setFrameAddressIsTaken(true);
6132 
6133   EVT VT = Op.getValueType();
6134   SDLoc dl(Op);  // FIXME probably not meaningful
6135   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6136   Register FrameReg = ARI.getFrameRegister(MF);
6137   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
6138   while (Depth--)
6139     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
6140                             MachinePointerInfo());
6141   return FrameAddr;
6142 }
6143 
6144 // FIXME? Maybe this could be a TableGen attribute on some registers and
6145 // this table could be generated automatically from RegInfo.
6146 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
6147                                               const MachineFunction &MF) const {
6148   Register Reg = StringSwitch<unsigned>(RegName)
6149                        .Case("sp", ARM::SP)
6150                        .Default(0);
6151   if (Reg)
6152     return Reg;
6153   report_fatal_error(Twine("Invalid register name \""
6154                               + StringRef(RegName)  + "\"."));
6155 }
6156 
6157 // Result is 64 bit value so split into two 32 bit values and return as a
6158 // pair of values.
6159 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
6160                                 SelectionDAG &DAG) {
6161   SDLoc DL(N);
6162 
6163   // This function is only supposed to be called for i64 type destination.
6164   assert(N->getValueType(0) == MVT::i64
6165           && "ExpandREAD_REGISTER called for non-i64 type result.");
6166 
6167   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
6168                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
6169                              N->getOperand(0),
6170                              N->getOperand(1));
6171 
6172   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
6173                     Read.getValue(1)));
6174   Results.push_back(Read.getOperand(0));
6175 }
6176 
6177 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
6178 /// When \p DstVT, the destination type of \p BC, is on the vector
6179 /// register bank and the source of bitcast, \p Op, operates on the same bank,
6180 /// it might be possible to combine them, such that everything stays on the
6181 /// vector register bank.
6182 /// \p return The node that would replace \p BT, if the combine
6183 /// is possible.
6184 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
6185                                                 SelectionDAG &DAG) {
6186   SDValue Op = BC->getOperand(0);
6187   EVT DstVT = BC->getValueType(0);
6188 
6189   // The only vector instruction that can produce a scalar (remember,
6190   // since the bitcast was about to be turned into VMOVDRR, the source
6191   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
6192   // Moreover, we can do this combine only if there is one use.
6193   // Finally, if the destination type is not a vector, there is not
6194   // much point on forcing everything on the vector bank.
6195   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6196       !Op.hasOneUse())
6197     return SDValue();
6198 
6199   // If the index is not constant, we will introduce an additional
6200   // multiply that will stick.
6201   // Give up in that case.
6202   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6203   if (!Index)
6204     return SDValue();
6205   unsigned DstNumElt = DstVT.getVectorNumElements();
6206 
6207   // Compute the new index.
6208   const APInt &APIntIndex = Index->getAPIntValue();
6209   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
6210   NewIndex *= APIntIndex;
6211   // Check if the new constant index fits into i32.
6212   if (NewIndex.getBitWidth() > 32)
6213     return SDValue();
6214 
6215   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
6216   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
6217   SDLoc dl(Op);
6218   SDValue ExtractSrc = Op.getOperand(0);
6219   EVT VecVT = EVT::getVectorVT(
6220       *DAG.getContext(), DstVT.getScalarType(),
6221       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
6222   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
6223   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
6224                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
6225 }
6226 
6227 /// ExpandBITCAST - If the target supports VFP, this function is called to
6228 /// expand a bit convert where either the source or destination type is i64 to
6229 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
6230 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
6231 /// vectors), since the legalizer won't know what to do with that.
6232 SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
6233                                          const ARMSubtarget *Subtarget) const {
6234   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6235   SDLoc dl(N);
6236   SDValue Op = N->getOperand(0);
6237 
6238   // This function is only supposed to be called for i16 and i64 types, either
6239   // as the source or destination of the bit convert.
6240   EVT SrcVT = Op.getValueType();
6241   EVT DstVT = N->getValueType(0);
6242 
6243   if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) &&
6244       (DstVT == MVT::f16 || DstVT == MVT::bf16))
6245     return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(),
6246                      DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op));
6247 
6248   if ((DstVT == MVT::i16 || DstVT == MVT::i32) &&
6249       (SrcVT == MVT::f16 || SrcVT == MVT::bf16))
6250     return DAG.getNode(
6251         ISD::TRUNCATE, SDLoc(N), DstVT,
6252         MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op));
6253 
6254   if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
6255     return SDValue();
6256 
6257   // Turn i64->f64 into VMOVDRR.
6258   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
6259     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
6260     // if we can combine the bitcast with its source.
6261     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
6262       return Val;
6263     SDValue Lo, Hi;
6264     std::tie(Lo, Hi) = DAG.SplitScalar(Op, dl, MVT::i32, MVT::i32);
6265     return DAG.getNode(ISD::BITCAST, dl, DstVT,
6266                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
6267   }
6268 
6269   // Turn f64->i64 into VMOVRRD.
6270   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
6271     SDValue Cvt;
6272     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
6273         SrcVT.getVectorNumElements() > 1)
6274       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6275                         DAG.getVTList(MVT::i32, MVT::i32),
6276                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
6277     else
6278       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6279                         DAG.getVTList(MVT::i32, MVT::i32), Op);
6280     // Merge the pieces into a single i64 value.
6281     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
6282   }
6283 
6284   return SDValue();
6285 }
6286 
6287 /// getZeroVector - Returns a vector of specified type with all zero elements.
6288 /// Zero vectors are used to represent vector negation and in those cases
6289 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
6290 /// not support i64 elements, so sometimes the zero vectors will need to be
6291 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
6292 /// zero vector.
6293 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6294   assert(VT.isVector() && "Expected a vector type");
6295   // The canonical modified immediate encoding of a zero vector is....0!
6296   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
6297   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
6298   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
6299   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6300 }
6301 
6302 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
6303 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6304 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
6305                                                 SelectionDAG &DAG) const {
6306   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6307   EVT VT = Op.getValueType();
6308   unsigned VTBits = VT.getSizeInBits();
6309   SDLoc dl(Op);
6310   SDValue ShOpLo = Op.getOperand(0);
6311   SDValue ShOpHi = Op.getOperand(1);
6312   SDValue ShAmt  = Op.getOperand(2);
6313   SDValue ARMcc;
6314   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6315   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
6316 
6317   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
6318 
6319   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6320                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6321   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
6322   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6323                                    DAG.getConstant(VTBits, dl, MVT::i32));
6324   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
6325   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6326   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
6327   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6328                             ISD::SETGE, ARMcc, DAG, dl);
6329   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
6330                            ARMcc, CCR, CmpLo);
6331 
6332   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
6333   SDValue HiBigShift = Opc == ISD::SRA
6334                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
6335                                          DAG.getConstant(VTBits - 1, dl, VT))
6336                            : DAG.getConstant(0, dl, VT);
6337   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6338                             ISD::SETGE, ARMcc, DAG, dl);
6339   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6340                            ARMcc, CCR, CmpHi);
6341 
6342   SDValue Ops[2] = { Lo, Hi };
6343   return DAG.getMergeValues(Ops, dl);
6344 }
6345 
6346 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
6347 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6348 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
6349                                                SelectionDAG &DAG) const {
6350   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6351   EVT VT = Op.getValueType();
6352   unsigned VTBits = VT.getSizeInBits();
6353   SDLoc dl(Op);
6354   SDValue ShOpLo = Op.getOperand(0);
6355   SDValue ShOpHi = Op.getOperand(1);
6356   SDValue ShAmt  = Op.getOperand(2);
6357   SDValue ARMcc;
6358   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6359 
6360   assert(Op.getOpcode() == ISD::SHL_PARTS);
6361   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6362                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6363   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
6364   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
6365   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6366 
6367   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6368                                    DAG.getConstant(VTBits, dl, MVT::i32));
6369   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
6370   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6371                             ISD::SETGE, ARMcc, DAG, dl);
6372   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6373                            ARMcc, CCR, CmpHi);
6374 
6375   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6376                           ISD::SETGE, ARMcc, DAG, dl);
6377   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
6378   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
6379                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
6380 
6381   SDValue Ops[2] = { Lo, Hi };
6382   return DAG.getMergeValues(Ops, dl);
6383 }
6384 
6385 SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op,
6386                                              SelectionDAG &DAG) const {
6387   // The rounding mode is in bits 23:22 of the FPSCR.
6388   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
6389   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
6390   // so that the shift + and get folded into a bitfield extract.
6391   SDLoc dl(Op);
6392   SDValue Chain = Op.getOperand(0);
6393   SDValue Ops[] = {Chain,
6394                    DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)};
6395 
6396   SDValue FPSCR =
6397       DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops);
6398   Chain = FPSCR.getValue(1);
6399   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
6400                                   DAG.getConstant(1U << 22, dl, MVT::i32));
6401   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
6402                               DAG.getConstant(22, dl, MVT::i32));
6403   SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
6404                             DAG.getConstant(3, dl, MVT::i32));
6405   return DAG.getMergeValues({And, Chain}, dl);
6406 }
6407 
6408 SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op,
6409                                              SelectionDAG &DAG) const {
6410   SDLoc DL(Op);
6411   SDValue Chain = Op->getOperand(0);
6412   SDValue RMValue = Op->getOperand(1);
6413 
6414   // The rounding mode is in bits 23:22 of the FPSCR.
6415   // The llvm.set.rounding argument value to ARM rounding mode value mapping
6416   // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
6417   // ((arg - 1) & 3) << 22).
6418   //
6419   // It is expected that the argument of llvm.set.rounding is within the
6420   // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is
6421   // responsibility of the code generated llvm.set.rounding to ensure this
6422   // condition.
6423 
6424   // Calculate new value of FPSCR[23:22].
6425   RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
6426                         DAG.getConstant(1, DL, MVT::i32));
6427   RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
6428                         DAG.getConstant(0x3, DL, MVT::i32));
6429   RMValue = DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
6430                         DAG.getConstant(ARM::RoundingBitsPos, DL, MVT::i32));
6431 
6432   // Get current value of FPSCR.
6433   SDValue Ops[] = {Chain,
6434                    DAG.getConstant(Intrinsic::arm_get_fpscr, DL, MVT::i32)};
6435   SDValue FPSCR =
6436       DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i32, MVT::Other}, Ops);
6437   Chain = FPSCR.getValue(1);
6438   FPSCR = FPSCR.getValue(0);
6439 
6440   // Put new rounding mode into FPSCR[23:22].
6441   const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos);
6442   FPSCR = DAG.getNode(ISD::AND, DL, MVT::i32, FPSCR,
6443                       DAG.getConstant(RMMask, DL, MVT::i32));
6444   FPSCR = DAG.getNode(ISD::OR, DL, MVT::i32, FPSCR, RMValue);
6445   SDValue Ops2[] = {
6446       Chain, DAG.getConstant(Intrinsic::arm_set_fpscr, DL, MVT::i32), FPSCR};
6447   return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
6448 }
6449 
6450 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
6451                          const ARMSubtarget *ST) {
6452   SDLoc dl(N);
6453   EVT VT = N->getValueType(0);
6454   if (VT.isVector() && ST->hasNEON()) {
6455 
6456     // Compute the least significant set bit: LSB = X & -X
6457     SDValue X = N->getOperand(0);
6458     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
6459     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
6460 
6461     EVT ElemTy = VT.getVectorElementType();
6462 
6463     if (ElemTy == MVT::i8) {
6464       // Compute with: cttz(x) = ctpop(lsb - 1)
6465       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6466                                 DAG.getTargetConstant(1, dl, ElemTy));
6467       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6468       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6469     }
6470 
6471     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
6472         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
6473       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
6474       unsigned NumBits = ElemTy.getSizeInBits();
6475       SDValue WidthMinus1 =
6476           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6477                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
6478       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
6479       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
6480     }
6481 
6482     // Compute with: cttz(x) = ctpop(lsb - 1)
6483 
6484     // Compute LSB - 1.
6485     SDValue Bits;
6486     if (ElemTy == MVT::i64) {
6487       // Load constant 0xffff'ffff'ffff'ffff to register.
6488       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6489                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
6490       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
6491     } else {
6492       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6493                                 DAG.getTargetConstant(1, dl, ElemTy));
6494       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6495     }
6496     return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6497   }
6498 
6499   if (!ST->hasV6T2Ops())
6500     return SDValue();
6501 
6502   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
6503   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
6504 }
6505 
6506 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
6507                           const ARMSubtarget *ST) {
6508   EVT VT = N->getValueType(0);
6509   SDLoc DL(N);
6510 
6511   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
6512   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
6513           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
6514          "Unexpected type for custom ctpop lowering");
6515 
6516   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6517   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
6518   SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
6519   Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);
6520 
6521   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
6522   unsigned EltSize = 8;
6523   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
6524   while (EltSize != VT.getScalarSizeInBits()) {
6525     SmallVector<SDValue, 8> Ops;
6526     Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
6527                                   TLI.getPointerTy(DAG.getDataLayout())));
6528     Ops.push_back(Res);
6529 
6530     EltSize *= 2;
6531     NumElts /= 2;
6532     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
6533     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
6534   }
6535 
6536   return Res;
6537 }
6538 
6539 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
6540 /// operand of a vector shift operation, where all the elements of the
6541 /// build_vector must have the same constant integer value.
6542 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
6543   // Ignore bit_converts.
6544   while (Op.getOpcode() == ISD::BITCAST)
6545     Op = Op.getOperand(0);
6546   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
6547   APInt SplatBits, SplatUndef;
6548   unsigned SplatBitSize;
6549   bool HasAnyUndefs;
6550   if (!BVN ||
6551       !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6552                             ElementBits) ||
6553       SplatBitSize > ElementBits)
6554     return false;
6555   Cnt = SplatBits.getSExtValue();
6556   return true;
6557 }
6558 
6559 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
6560 /// operand of a vector shift left operation.  That value must be in the range:
6561 ///   0 <= Value < ElementBits for a left shift; or
6562 ///   0 <= Value <= ElementBits for a long left shift.
6563 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
6564   assert(VT.isVector() && "vector shift count is not a vector type");
6565   int64_t ElementBits = VT.getScalarSizeInBits();
6566   if (!getVShiftImm(Op, ElementBits, Cnt))
6567     return false;
6568   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6569 }
6570 
6571 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
6572 /// operand of a vector shift right operation.  For a shift opcode, the value
6573 /// is positive, but for an intrinsic the value count must be negative. The
6574 /// absolute value must be in the range:
6575 ///   1 <= |Value| <= ElementBits for a right shift; or
6576 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
6577 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
6578                          int64_t &Cnt) {
6579   assert(VT.isVector() && "vector shift count is not a vector type");
6580   int64_t ElementBits = VT.getScalarSizeInBits();
6581   if (!getVShiftImm(Op, ElementBits, Cnt))
6582     return false;
6583   if (!isIntrinsic)
6584     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6585   if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
6586     Cnt = -Cnt;
6587     return true;
6588   }
6589   return false;
6590 }
6591 
6592 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
6593                           const ARMSubtarget *ST) {
6594   EVT VT = N->getValueType(0);
6595   SDLoc dl(N);
6596   int64_t Cnt;
6597 
6598   if (!VT.isVector())
6599     return SDValue();
6600 
6601   // We essentially have two forms here. Shift by an immediate and shift by a
6602   // vector register (there are also shift by a gpr, but that is just handled
6603   // with a tablegen pattern). We cannot easily match shift by an immediate in
6604   // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
6605   // For shifting by a vector, we don't have VSHR, only VSHL (which can be
6606   // signed or unsigned, and a negative shift indicates a shift right).
6607   if (N->getOpcode() == ISD::SHL) {
6608     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
6609       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
6610                          DAG.getConstant(Cnt, dl, MVT::i32));
6611     return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0),
6612                        N->getOperand(1));
6613   }
6614 
6615   assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
6616          "unexpected vector shift opcode");
6617 
6618   if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
6619     unsigned VShiftOpc =
6620         (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
6621     return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
6622                        DAG.getConstant(Cnt, dl, MVT::i32));
6623   }
6624 
6625   // Other right shifts we don't have operations for (we use a shift left by a
6626   // negative number).
6627   EVT ShiftVT = N->getOperand(1).getValueType();
6628   SDValue NegatedCount = DAG.getNode(
6629       ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1));
6630   unsigned VShiftOpc =
6631       (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu);
6632   return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount);
6633 }
6634 
6635 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
6636                                 const ARMSubtarget *ST) {
6637   EVT VT = N->getValueType(0);
6638   SDLoc dl(N);
6639 
6640   // We can get here for a node like i32 = ISD::SHL i32, i64
6641   if (VT != MVT::i64)
6642     return SDValue();
6643 
6644   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
6645           N->getOpcode() == ISD::SHL) &&
6646          "Unknown shift to lower!");
6647 
6648   unsigned ShOpc = N->getOpcode();
6649   if (ST->hasMVEIntegerOps()) {
6650     SDValue ShAmt = N->getOperand(1);
6651     unsigned ShPartsOpc = ARMISD::LSLL;
6652     ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);
6653 
6654     // If the shift amount is greater than 32 or has a greater bitwidth than 64
6655     // then do the default optimisation
6656     if (ShAmt->getValueType(0).getSizeInBits() > 64 ||
6657         (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32)))
6658       return SDValue();
6659 
6660     // Extract the lower 32 bits of the shift amount if it's not an i32
6661     if (ShAmt->getValueType(0) != MVT::i32)
6662       ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32);
6663 
6664     if (ShOpc == ISD::SRL) {
6665       if (!Con)
6666         // There is no t2LSRLr instruction so negate and perform an lsll if the
6667         // shift amount is in a register, emulating a right shift.
6668         ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6669                             DAG.getConstant(0, dl, MVT::i32), ShAmt);
6670       else
6671         // Else generate an lsrl on the immediate shift amount
6672         ShPartsOpc = ARMISD::LSRL;
6673     } else if (ShOpc == ISD::SRA)
6674       ShPartsOpc = ARMISD::ASRL;
6675 
6676     // Split Lower/Upper 32 bits of the destination/source
6677     SDValue Lo, Hi;
6678     std::tie(Lo, Hi) =
6679         DAG.SplitScalar(N->getOperand(0), dl, MVT::i32, MVT::i32);
6680     // Generate the shift operation as computed above
6681     Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
6682                      ShAmt);
6683     // The upper 32 bits come from the second return value of lsll
6684     Hi = SDValue(Lo.getNode(), 1);
6685     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6686   }
6687 
6688   // We only lower SRA, SRL of 1 here, all others use generic lowering.
6689   if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
6690     return SDValue();
6691 
6692   // If we are in thumb mode, we don't have RRX.
6693   if (ST->isThumb1Only())
6694     return SDValue();
6695 
6696   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
6697   SDValue Lo, Hi;
6698   std::tie(Lo, Hi) = DAG.SplitScalar(N->getOperand(0), dl, MVT::i32, MVT::i32);
6699 
6700   // First, build a SRA_GLUE/SRL_GLUE op, which shifts the top part by one and
6701   // captures the result into a carry flag.
6702   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_GLUE:ARMISD::SRA_GLUE;
6703   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
6704 
6705   // The low part is an ARMISD::RRX operand, which shifts the carry in.
6706   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
6707 
6708   // Merge the pieces into a single i64 value.
6709  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6710 }
6711 
6712 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
6713                            const ARMSubtarget *ST) {
6714   bool Invert = false;
6715   bool Swap = false;
6716   unsigned Opc = ARMCC::AL;
6717 
6718   SDValue Op0 = Op.getOperand(0);
6719   SDValue Op1 = Op.getOperand(1);
6720   SDValue CC = Op.getOperand(2);
6721   EVT VT = Op.getValueType();
6722   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
6723   SDLoc dl(Op);
6724 
6725   EVT CmpVT;
6726   if (ST->hasNEON())
6727     CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
6728   else {
6729     assert(ST->hasMVEIntegerOps() &&
6730            "No hardware support for integer vector comparison!");
6731 
6732     if (Op.getValueType().getVectorElementType() != MVT::i1)
6733       return SDValue();
6734 
6735     // Make sure we expand floating point setcc to scalar if we do not have
6736     // mve.fp, so that we can handle them from there.
6737     if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps())
6738       return SDValue();
6739 
6740     CmpVT = VT;
6741   }
6742 
6743   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
6744       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
6745     // Special-case integer 64-bit equality comparisons. They aren't legal,
6746     // but they can be lowered with a few vector instructions.
6747     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
6748     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
6749     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
6750     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
6751     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
6752                               DAG.getCondCode(ISD::SETEQ));
6753     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
6754     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
6755     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
6756     if (SetCCOpcode == ISD::SETNE)
6757       Merged = DAG.getNOT(dl, Merged, CmpVT);
6758     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
6759     return Merged;
6760   }
6761 
6762   if (CmpVT.getVectorElementType() == MVT::i64)
6763     // 64-bit comparisons are not legal in general.
6764     return SDValue();
6765 
6766   if (Op1.getValueType().isFloatingPoint()) {
6767     switch (SetCCOpcode) {
6768     default: llvm_unreachable("Illegal FP comparison");
6769     case ISD::SETUNE:
6770     case ISD::SETNE:
6771       if (ST->hasMVEFloatOps()) {
6772         Opc = ARMCC::NE; break;
6773       } else {
6774         Invert = true; [[fallthrough]];
6775       }
6776     case ISD::SETOEQ:
6777     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6778     case ISD::SETOLT:
6779     case ISD::SETLT: Swap = true; [[fallthrough]];
6780     case ISD::SETOGT:
6781     case ISD::SETGT:  Opc = ARMCC::GT; break;
6782     case ISD::SETOLE:
6783     case ISD::SETLE:  Swap = true; [[fallthrough]];
6784     case ISD::SETOGE:
6785     case ISD::SETGE: Opc = ARMCC::GE; break;
6786     case ISD::SETUGE: Swap = true; [[fallthrough]];
6787     case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
6788     case ISD::SETUGT: Swap = true; [[fallthrough]];
6789     case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
6790     case ISD::SETUEQ: Invert = true; [[fallthrough]];
6791     case ISD::SETONE: {
6792       // Expand this to (OLT | OGT).
6793       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6794                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6795       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6796                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6797       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6798       if (Invert)
6799         Result = DAG.getNOT(dl, Result, VT);
6800       return Result;
6801     }
6802     case ISD::SETUO: Invert = true; [[fallthrough]];
6803     case ISD::SETO: {
6804       // Expand this to (OLT | OGE).
6805       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6806                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6807       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6808                                    DAG.getConstant(ARMCC::GE, dl, MVT::i32));
6809       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6810       if (Invert)
6811         Result = DAG.getNOT(dl, Result, VT);
6812       return Result;
6813     }
6814     }
6815   } else {
6816     // Integer comparisons.
6817     switch (SetCCOpcode) {
6818     default: llvm_unreachable("Illegal integer comparison");
6819     case ISD::SETNE:
6820       if (ST->hasMVEIntegerOps()) {
6821         Opc = ARMCC::NE; break;
6822       } else {
6823         Invert = true; [[fallthrough]];
6824       }
6825     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6826     case ISD::SETLT:  Swap = true; [[fallthrough]];
6827     case ISD::SETGT:  Opc = ARMCC::GT; break;
6828     case ISD::SETLE:  Swap = true; [[fallthrough]];
6829     case ISD::SETGE:  Opc = ARMCC::GE; break;
6830     case ISD::SETULT: Swap = true; [[fallthrough]];
6831     case ISD::SETUGT: Opc = ARMCC::HI; break;
6832     case ISD::SETULE: Swap = true; [[fallthrough]];
6833     case ISD::SETUGE: Opc = ARMCC::HS; break;
6834     }
6835 
6836     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
6837     if (ST->hasNEON() && Opc == ARMCC::EQ) {
6838       SDValue AndOp;
6839       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6840         AndOp = Op0;
6841       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
6842         AndOp = Op1;
6843 
6844       // Ignore bitconvert.
6845       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
6846         AndOp = AndOp.getOperand(0);
6847 
6848       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
6849         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
6850         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
6851         SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1);
6852         if (!Invert)
6853           Result = DAG.getNOT(dl, Result, VT);
6854         return Result;
6855       }
6856     }
6857   }
6858 
6859   if (Swap)
6860     std::swap(Op0, Op1);
6861 
6862   // If one of the operands is a constant vector zero, attempt to fold the
6863   // comparison to a specialized compare-against-zero form.
6864   if (ISD::isBuildVectorAllZeros(Op0.getNode()) &&
6865       (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::EQ ||
6866        Opc == ARMCC::NE)) {
6867     if (Opc == ARMCC::GE)
6868       Opc = ARMCC::LE;
6869     else if (Opc == ARMCC::GT)
6870       Opc = ARMCC::LT;
6871     std::swap(Op0, Op1);
6872   }
6873 
6874   SDValue Result;
6875   if (ISD::isBuildVectorAllZeros(Op1.getNode()) &&
6876       (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::LE ||
6877        Opc == ARMCC::LT || Opc == ARMCC::NE || Opc == ARMCC::EQ))
6878     Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, Op0,
6879                          DAG.getConstant(Opc, dl, MVT::i32));
6880   else
6881     Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6882                          DAG.getConstant(Opc, dl, MVT::i32));
6883 
6884   Result = DAG.getSExtOrTrunc(Result, dl, VT);
6885 
6886   if (Invert)
6887     Result = DAG.getNOT(dl, Result, VT);
6888 
6889   return Result;
6890 }
6891 
6892 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
6893   SDValue LHS = Op.getOperand(0);
6894   SDValue RHS = Op.getOperand(1);
6895   SDValue Carry = Op.getOperand(2);
6896   SDValue Cond = Op.getOperand(3);
6897   SDLoc DL(Op);
6898 
6899   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
6900 
6901   // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we
6902   // have to invert the carry first.
6903   Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
6904                       DAG.getConstant(1, DL, MVT::i32), Carry);
6905   // This converts the boolean value carry into the carry flag.
6906   Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
6907 
6908   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
6909   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
6910 
6911   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
6912   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
6913   SDValue ARMcc = DAG.getConstant(
6914       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
6915   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6916   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
6917                                    Cmp.getValue(1), SDValue());
6918   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
6919                      CCR, Chain.getValue(1));
6920 }
6921 
6922 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a
6923 /// valid vector constant for a NEON or MVE instruction with a "modified
6924 /// immediate" operand (e.g., VMOV).  If so, return the encoded value.
6925 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
6926                                  unsigned SplatBitSize, SelectionDAG &DAG,
6927                                  const SDLoc &dl, EVT &VT, EVT VectorVT,
6928                                  VMOVModImmType type) {
6929   unsigned OpCmode, Imm;
6930   bool is128Bits = VectorVT.is128BitVector();
6931 
6932   // SplatBitSize is set to the smallest size that splats the vector, so a
6933   // zero vector will always have SplatBitSize == 8.  However, NEON modified
6934   // immediate instructions others than VMOV do not support the 8-bit encoding
6935   // of a zero vector, and the default encoding of zero is supposed to be the
6936   // 32-bit version.
6937   if (SplatBits == 0)
6938     SplatBitSize = 32;
6939 
6940   switch (SplatBitSize) {
6941   case 8:
6942     if (type != VMOVModImm)
6943       return SDValue();
6944     // Any 1-byte value is OK.  Op=0, Cmode=1110.
6945     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
6946     OpCmode = 0xe;
6947     Imm = SplatBits;
6948     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
6949     break;
6950 
6951   case 16:
6952     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
6953     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
6954     if ((SplatBits & ~0xff) == 0) {
6955       // Value = 0x00nn: Op=x, Cmode=100x.
6956       OpCmode = 0x8;
6957       Imm = SplatBits;
6958       break;
6959     }
6960     if ((SplatBits & ~0xff00) == 0) {
6961       // Value = 0xnn00: Op=x, Cmode=101x.
6962       OpCmode = 0xa;
6963       Imm = SplatBits >> 8;
6964       break;
6965     }
6966     return SDValue();
6967 
6968   case 32:
6969     // NEON's 32-bit VMOV supports splat values where:
6970     // * only one byte is nonzero, or
6971     // * the least significant byte is 0xff and the second byte is nonzero, or
6972     // * the least significant 2 bytes are 0xff and the third is nonzero.
6973     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
6974     if ((SplatBits & ~0xff) == 0) {
6975       // Value = 0x000000nn: Op=x, Cmode=000x.
6976       OpCmode = 0;
6977       Imm = SplatBits;
6978       break;
6979     }
6980     if ((SplatBits & ~0xff00) == 0) {
6981       // Value = 0x0000nn00: Op=x, Cmode=001x.
6982       OpCmode = 0x2;
6983       Imm = SplatBits >> 8;
6984       break;
6985     }
6986     if ((SplatBits & ~0xff0000) == 0) {
6987       // Value = 0x00nn0000: Op=x, Cmode=010x.
6988       OpCmode = 0x4;
6989       Imm = SplatBits >> 16;
6990       break;
6991     }
6992     if ((SplatBits & ~0xff000000) == 0) {
6993       // Value = 0xnn000000: Op=x, Cmode=011x.
6994       OpCmode = 0x6;
6995       Imm = SplatBits >> 24;
6996       break;
6997     }
6998 
6999     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
7000     if (type == OtherModImm) return SDValue();
7001 
7002     if ((SplatBits & ~0xffff) == 0 &&
7003         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
7004       // Value = 0x0000nnff: Op=x, Cmode=1100.
7005       OpCmode = 0xc;
7006       Imm = SplatBits >> 8;
7007       break;
7008     }
7009 
7010     // cmode == 0b1101 is not supported for MVE VMVN
7011     if (type == MVEVMVNModImm)
7012       return SDValue();
7013 
7014     if ((SplatBits & ~0xffffff) == 0 &&
7015         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
7016       // Value = 0x00nnffff: Op=x, Cmode=1101.
7017       OpCmode = 0xd;
7018       Imm = SplatBits >> 16;
7019       break;
7020     }
7021 
7022     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
7023     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
7024     // VMOV.I32.  A (very) minor optimization would be to replicate the value
7025     // and fall through here to test for a valid 64-bit splat.  But, then the
7026     // caller would also need to check and handle the change in size.
7027     return SDValue();
7028 
7029   case 64: {
7030     if (type != VMOVModImm)
7031       return SDValue();
7032     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
7033     uint64_t BitMask = 0xff;
7034     unsigned ImmMask = 1;
7035     Imm = 0;
7036     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
7037       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
7038         Imm |= ImmMask;
7039       } else if ((SplatBits & BitMask) != 0) {
7040         return SDValue();
7041       }
7042       BitMask <<= 8;
7043       ImmMask <<= 1;
7044     }
7045 
7046     if (DAG.getDataLayout().isBigEndian()) {
7047       // Reverse the order of elements within the vector.
7048       unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8;
7049       unsigned Mask = (1 << BytesPerElem) - 1;
7050       unsigned NumElems = 8 / BytesPerElem;
7051       unsigned NewImm = 0;
7052       for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) {
7053         unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask);
7054         NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem;
7055       }
7056       Imm = NewImm;
7057     }
7058 
7059     // Op=1, Cmode=1110.
7060     OpCmode = 0x1e;
7061     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
7062     break;
7063   }
7064 
7065   default:
7066     llvm_unreachable("unexpected size for isVMOVModifiedImm");
7067   }
7068 
7069   unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm);
7070   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
7071 }
7072 
7073 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
7074                                            const ARMSubtarget *ST) const {
7075   EVT VT = Op.getValueType();
7076   bool IsDouble = (VT == MVT::f64);
7077   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
7078   const APFloat &FPVal = CFP->getValueAPF();
7079 
7080   // Prevent floating-point constants from using literal loads
7081   // when execute-only is enabled.
7082   if (ST->genExecuteOnly()) {
7083     // We shouldn't trigger this for v6m execute-only
7084     assert((!ST->isThumb1Only() || ST->hasV8MBaselineOps()) &&
7085            "Unexpected architecture");
7086 
7087     // If we can represent the constant as an immediate, don't lower it
7088     if (isFPImmLegal(FPVal, VT))
7089       return Op;
7090     // Otherwise, construct as integer, and move to float register
7091     APInt INTVal = FPVal.bitcastToAPInt();
7092     SDLoc DL(CFP);
7093     switch (VT.getSimpleVT().SimpleTy) {
7094       default:
7095         llvm_unreachable("Unknown floating point type!");
7096         break;
7097       case MVT::f64: {
7098         SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
7099         SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
7100         return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
7101       }
7102       case MVT::f32:
7103           return DAG.getNode(ARMISD::VMOVSR, DL, VT,
7104               DAG.getConstant(INTVal, DL, MVT::i32));
7105     }
7106   }
7107 
7108   if (!ST->hasVFP3Base())
7109     return SDValue();
7110 
7111   // Use the default (constant pool) lowering for double constants when we have
7112   // an SP-only FPU
7113   if (IsDouble && !Subtarget->hasFP64())
7114     return SDValue();
7115 
7116   // Try splatting with a VMOV.f32...
7117   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
7118 
7119   if (ImmVal != -1) {
7120     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
7121       // We have code in place to select a valid ConstantFP already, no need to
7122       // do any mangling.
7123       return Op;
7124     }
7125 
7126     // It's a float and we are trying to use NEON operations where
7127     // possible. Lower it to a splat followed by an extract.
7128     SDLoc DL(Op);
7129     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
7130     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
7131                                       NewVal);
7132     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
7133                        DAG.getConstant(0, DL, MVT::i32));
7134   }
7135 
7136   // The rest of our options are NEON only, make sure that's allowed before
7137   // proceeding..
7138   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
7139     return SDValue();
7140 
7141   EVT VMovVT;
7142   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
7143 
7144   // It wouldn't really be worth bothering for doubles except for one very
7145   // important value, which does happen to match: 0.0. So make sure we don't do
7146   // anything stupid.
7147   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
7148     return SDValue();
7149 
7150   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
7151   SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
7152                                      VMovVT, VT, VMOVModImm);
7153   if (NewVal != SDValue()) {
7154     SDLoc DL(Op);
7155     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
7156                                       NewVal);
7157     if (IsDouble)
7158       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
7159 
7160     // It's a float: cast and extract a vector element.
7161     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
7162                                        VecConstant);
7163     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
7164                        DAG.getConstant(0, DL, MVT::i32));
7165   }
7166 
7167   // Finally, try a VMVN.i32
7168   NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
7169                              VT, VMVNModImm);
7170   if (NewVal != SDValue()) {
7171     SDLoc DL(Op);
7172     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
7173 
7174     if (IsDouble)
7175       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
7176 
7177     // It's a float: cast and extract a vector element.
7178     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
7179                                        VecConstant);
7180     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
7181                        DAG.getConstant(0, DL, MVT::i32));
7182   }
7183 
7184   return SDValue();
7185 }
7186 
7187 // check if an VEXT instruction can handle the shuffle mask when the
7188 // vector sources of the shuffle are the same.
7189 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
7190   unsigned NumElts = VT.getVectorNumElements();
7191 
7192   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
7193   if (M[0] < 0)
7194     return false;
7195 
7196   Imm = M[0];
7197 
7198   // If this is a VEXT shuffle, the immediate value is the index of the first
7199   // element.  The other shuffle indices must be the successive elements after
7200   // the first one.
7201   unsigned ExpectedElt = Imm;
7202   for (unsigned i = 1; i < NumElts; ++i) {
7203     // Increment the expected index.  If it wraps around, just follow it
7204     // back to index zero and keep going.
7205     ++ExpectedElt;
7206     if (ExpectedElt == NumElts)
7207       ExpectedElt = 0;
7208 
7209     if (M[i] < 0) continue; // ignore UNDEF indices
7210     if (ExpectedElt != static_cast<unsigned>(M[i]))
7211       return false;
7212   }
7213 
7214   return true;
7215 }
7216 
7217 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
7218                        bool &ReverseVEXT, unsigned &Imm) {
7219   unsigned NumElts = VT.getVectorNumElements();
7220   ReverseVEXT = false;
7221 
7222   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
7223   if (M[0] < 0)
7224     return false;
7225 
7226   Imm = M[0];
7227 
7228   // If this is a VEXT shuffle, the immediate value is the index of the first
7229   // element.  The other shuffle indices must be the successive elements after
7230   // the first one.
7231   unsigned ExpectedElt = Imm;
7232   for (unsigned i = 1; i < NumElts; ++i) {
7233     // Increment the expected index.  If it wraps around, it may still be
7234     // a VEXT but the source vectors must be swapped.
7235     ExpectedElt += 1;
7236     if (ExpectedElt == NumElts * 2) {
7237       ExpectedElt = 0;
7238       ReverseVEXT = true;
7239     }
7240 
7241     if (M[i] < 0) continue; // ignore UNDEF indices
7242     if (ExpectedElt != static_cast<unsigned>(M[i]))
7243       return false;
7244   }
7245 
7246   // Adjust the index value if the source operands will be swapped.
7247   if (ReverseVEXT)
7248     Imm -= NumElts;
7249 
7250   return true;
7251 }
7252 
7253 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
7254   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
7255   // range, then 0 is placed into the resulting vector. So pretty much any mask
7256   // of 8 elements can work here.
7257   return VT == MVT::v8i8 && M.size() == 8;
7258 }
7259 
7260 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
7261                                unsigned Index) {
7262   if (Mask.size() == Elements * 2)
7263     return Index / Elements;
7264   return Mask[Index] == 0 ? 0 : 1;
7265 }
7266 
7267 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
7268 // checking that pairs of elements in the shuffle mask represent the same index
7269 // in each vector, incrementing the expected index by 2 at each step.
7270 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
7271 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
7272 //  v2={e,f,g,h}
7273 // WhichResult gives the offset for each element in the mask based on which
7274 // of the two results it belongs to.
7275 //
7276 // The transpose can be represented either as:
7277 // result1 = shufflevector v1, v2, result1_shuffle_mask
7278 // result2 = shufflevector v1, v2, result2_shuffle_mask
7279 // where v1/v2 and the shuffle masks have the same number of elements
7280 // (here WhichResult (see below) indicates which result is being checked)
7281 //
7282 // or as:
7283 // results = shufflevector v1, v2, shuffle_mask
7284 // where both results are returned in one vector and the shuffle mask has twice
7285 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
7286 // want to check the low half and high half of the shuffle mask as if it were
7287 // the other case
7288 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7289   unsigned EltSz = VT.getScalarSizeInBits();
7290   if (EltSz == 64)
7291     return false;
7292 
7293   unsigned NumElts = VT.getVectorNumElements();
7294   if (M.size() != NumElts && M.size() != NumElts*2)
7295     return false;
7296 
7297   // If the mask is twice as long as the input vector then we need to check the
7298   // upper and lower parts of the mask with a matching value for WhichResult
7299   // FIXME: A mask with only even values will be rejected in case the first
7300   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
7301   // M[0] is used to determine WhichResult
7302   for (unsigned i = 0; i < M.size(); i += NumElts) {
7303     WhichResult = SelectPairHalf(NumElts, M, i);
7304     for (unsigned j = 0; j < NumElts; j += 2) {
7305       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7306           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
7307         return false;
7308     }
7309   }
7310 
7311   if (M.size() == NumElts*2)
7312     WhichResult = 0;
7313 
7314   return true;
7315 }
7316 
7317 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
7318 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7319 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
7320 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7321   unsigned EltSz = VT.getScalarSizeInBits();
7322   if (EltSz == 64)
7323     return false;
7324 
7325   unsigned NumElts = VT.getVectorNumElements();
7326   if (M.size() != NumElts && M.size() != NumElts*2)
7327     return false;
7328 
7329   for (unsigned i = 0; i < M.size(); i += NumElts) {
7330     WhichResult = SelectPairHalf(NumElts, M, i);
7331     for (unsigned j = 0; j < NumElts; j += 2) {
7332       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7333           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
7334         return false;
7335     }
7336   }
7337 
7338   if (M.size() == NumElts*2)
7339     WhichResult = 0;
7340 
7341   return true;
7342 }
7343 
7344 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
7345 // that the mask elements are either all even and in steps of size 2 or all odd
7346 // and in steps of size 2.
7347 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
7348 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
7349 //  v2={e,f,g,h}
7350 // Requires similar checks to that of isVTRNMask with
7351 // respect the how results are returned.
7352 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7353   unsigned EltSz = VT.getScalarSizeInBits();
7354   if (EltSz == 64)
7355     return false;
7356 
7357   unsigned NumElts = VT.getVectorNumElements();
7358   if (M.size() != NumElts && M.size() != NumElts*2)
7359     return false;
7360 
7361   for (unsigned i = 0; i < M.size(); i += NumElts) {
7362     WhichResult = SelectPairHalf(NumElts, M, i);
7363     for (unsigned j = 0; j < NumElts; ++j) {
7364       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
7365         return false;
7366     }
7367   }
7368 
7369   if (M.size() == NumElts*2)
7370     WhichResult = 0;
7371 
7372   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7373   if (VT.is64BitVector() && EltSz == 32)
7374     return false;
7375 
7376   return true;
7377 }
7378 
7379 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
7380 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7381 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
7382 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7383   unsigned EltSz = VT.getScalarSizeInBits();
7384   if (EltSz == 64)
7385     return false;
7386 
7387   unsigned NumElts = VT.getVectorNumElements();
7388   if (M.size() != NumElts && M.size() != NumElts*2)
7389     return false;
7390 
7391   unsigned Half = NumElts / 2;
7392   for (unsigned i = 0; i < M.size(); i += NumElts) {
7393     WhichResult = SelectPairHalf(NumElts, M, i);
7394     for (unsigned j = 0; j < NumElts; j += Half) {
7395       unsigned Idx = WhichResult;
7396       for (unsigned k = 0; k < Half; ++k) {
7397         int MIdx = M[i + j + k];
7398         if (MIdx >= 0 && (unsigned) MIdx != Idx)
7399           return false;
7400         Idx += 2;
7401       }
7402     }
7403   }
7404 
7405   if (M.size() == NumElts*2)
7406     WhichResult = 0;
7407 
7408   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7409   if (VT.is64BitVector() && EltSz == 32)
7410     return false;
7411 
7412   return true;
7413 }
7414 
7415 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
7416 // that pairs of elements of the shufflemask represent the same index in each
7417 // vector incrementing sequentially through the vectors.
7418 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
7419 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
7420 //  v2={e,f,g,h}
7421 // Requires similar checks to that of isVTRNMask with respect the how results
7422 // are returned.
7423 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7424   unsigned EltSz = VT.getScalarSizeInBits();
7425   if (EltSz == 64)
7426     return false;
7427 
7428   unsigned NumElts = VT.getVectorNumElements();
7429   if (M.size() != NumElts && M.size() != NumElts*2)
7430     return false;
7431 
7432   for (unsigned i = 0; i < M.size(); i += NumElts) {
7433     WhichResult = SelectPairHalf(NumElts, M, i);
7434     unsigned Idx = WhichResult * NumElts / 2;
7435     for (unsigned j = 0; j < NumElts; j += 2) {
7436       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7437           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
7438         return false;
7439       Idx += 1;
7440     }
7441   }
7442 
7443   if (M.size() == NumElts*2)
7444     WhichResult = 0;
7445 
7446   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7447   if (VT.is64BitVector() && EltSz == 32)
7448     return false;
7449 
7450   return true;
7451 }
7452 
7453 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
7454 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7455 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
7456 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7457   unsigned EltSz = VT.getScalarSizeInBits();
7458   if (EltSz == 64)
7459     return false;
7460 
7461   unsigned NumElts = VT.getVectorNumElements();
7462   if (M.size() != NumElts && M.size() != NumElts*2)
7463     return false;
7464 
7465   for (unsigned i = 0; i < M.size(); i += NumElts) {
7466     WhichResult = SelectPairHalf(NumElts, M, i);
7467     unsigned Idx = WhichResult * NumElts / 2;
7468     for (unsigned j = 0; j < NumElts; j += 2) {
7469       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7470           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
7471         return false;
7472       Idx += 1;
7473     }
7474   }
7475 
7476   if (M.size() == NumElts*2)
7477     WhichResult = 0;
7478 
7479   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7480   if (VT.is64BitVector() && EltSz == 32)
7481     return false;
7482 
7483   return true;
7484 }
7485 
7486 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
7487 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
7488 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
7489                                            unsigned &WhichResult,
7490                                            bool &isV_UNDEF) {
7491   isV_UNDEF = false;
7492   if (isVTRNMask(ShuffleMask, VT, WhichResult))
7493     return ARMISD::VTRN;
7494   if (isVUZPMask(ShuffleMask, VT, WhichResult))
7495     return ARMISD::VUZP;
7496   if (isVZIPMask(ShuffleMask, VT, WhichResult))
7497     return ARMISD::VZIP;
7498 
7499   isV_UNDEF = true;
7500   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
7501     return ARMISD::VTRN;
7502   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7503     return ARMISD::VUZP;
7504   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7505     return ARMISD::VZIP;
7506 
7507   return 0;
7508 }
7509 
7510 /// \return true if this is a reverse operation on an vector.
7511 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
7512   unsigned NumElts = VT.getVectorNumElements();
7513   // Make sure the mask has the right size.
7514   if (NumElts != M.size())
7515       return false;
7516 
7517   // Look for <15, ..., 3, -1, 1, 0>.
7518   for (unsigned i = 0; i != NumElts; ++i)
7519     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
7520       return false;
7521 
7522   return true;
7523 }
7524 
7525 static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) {
7526   unsigned NumElts = VT.getVectorNumElements();
7527   // Make sure the mask has the right size.
7528   if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
7529     return false;
7530 
7531   // Half-width truncation patterns (e.g. v4i32 -> v8i16):
7532   // !Top &&  SingleSource: <0, 2, 4, 6, 0, 2, 4, 6>
7533   // !Top && !SingleSource: <0, 2, 4, 6, 8, 10, 12, 14>
7534   //  Top &&  SingleSource: <1, 3, 5, 7, 1, 3, 5, 7>
7535   //  Top && !SingleSource: <1, 3, 5, 7, 9, 11, 13, 15>
7536   int Ofs = Top ? 1 : 0;
7537   int Upper = SingleSource ? 0 : NumElts;
7538   for (int i = 0, e = NumElts / 2; i != e; ++i) {
7539     if (M[i] >= 0 && M[i] != (i * 2) + Ofs)
7540       return false;
7541     if (M[i + e] >= 0 && M[i + e] != (i * 2) + Ofs + Upper)
7542       return false;
7543   }
7544   return true;
7545 }
7546 
7547 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) {
7548   unsigned NumElts = VT.getVectorNumElements();
7549   // Make sure the mask has the right size.
7550   if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
7551     return false;
7552 
7553   // If Top
7554   //   Look for <0, N, 2, N+2, 4, N+4, ..>.
7555   //   This inserts Input2 into Input1
7556   // else if not Top
7557   //   Look for <0, N+1, 2, N+3, 4, N+5, ..>
7558   //   This inserts Input1 into Input2
7559   unsigned Offset = Top ? 0 : 1;
7560   unsigned N = SingleSource ? 0 : NumElts;
7561   for (unsigned i = 0; i < NumElts; i += 2) {
7562     if (M[i] >= 0 && M[i] != (int)i)
7563       return false;
7564     if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset))
7565       return false;
7566   }
7567 
7568   return true;
7569 }
7570 
7571 static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) {
7572   unsigned NumElts = ToVT.getVectorNumElements();
7573   if (NumElts != M.size())
7574     return false;
7575 
7576   // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are
7577   // looking for patterns of:
7578   // !rev: 0 N/2 1 N/2+1 2 N/2+2 ...
7579   //  rev: N/2 0 N/2+1 1 N/2+2 2 ...
7580 
7581   unsigned Off0 = rev ? NumElts / 2 : 0;
7582   unsigned Off1 = rev ? 0 : NumElts / 2;
7583   for (unsigned i = 0; i < NumElts; i += 2) {
7584     if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2))
7585       return false;
7586     if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2))
7587       return false;
7588   }
7589 
7590   return true;
7591 }
7592 
7593 // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted
7594 // from a pair of inputs. For example:
7595 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7596 //             FP_ROUND(EXTRACT_ELT(Y, 0),
7597 //             FP_ROUND(EXTRACT_ELT(X, 1),
7598 //             FP_ROUND(EXTRACT_ELT(Y, 1), ...)
7599 static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
7600                                          const ARMSubtarget *ST) {
7601   assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7602   if (!ST->hasMVEFloatOps())
7603     return SDValue();
7604 
7605   SDLoc dl(BV);
7606   EVT VT = BV.getValueType();
7607   if (VT != MVT::v8f16)
7608     return SDValue();
7609 
7610   // We are looking for a buildvector of fptrunc elements, where all the
7611   // elements are interleavingly extracted from two sources. Check the first two
7612   // items are valid enough and extract some info from them (they are checked
7613   // properly in the loop below).
7614   if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND ||
7615       BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7616       BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0)
7617     return SDValue();
7618   if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND ||
7619       BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7620       BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0)
7621     return SDValue();
7622   SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7623   SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0);
7624   if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32)
7625     return SDValue();
7626 
7627   // Check all the values in the BuildVector line up with our expectations.
7628   for (unsigned i = 1; i < 4; i++) {
7629     auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7630       return Trunc.getOpcode() == ISD::FP_ROUND &&
7631              Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7632              Trunc.getOperand(0).getOperand(0) == Op &&
7633              Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7634     };
7635     if (!Check(BV.getOperand(i * 2 + 0), Op0, i))
7636       return SDValue();
7637     if (!Check(BV.getOperand(i * 2 + 1), Op1, i))
7638       return SDValue();
7639   }
7640 
7641   SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0,
7642                            DAG.getConstant(0, dl, MVT::i32));
7643   return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1,
7644                      DAG.getConstant(1, dl, MVT::i32));
7645 }
7646 
7647 // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted
7648 // from a single input on alternating lanes. For example:
7649 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7650 //             FP_ROUND(EXTRACT_ELT(X, 2),
7651 //             FP_ROUND(EXTRACT_ELT(X, 4), ...)
7652 static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
7653                                        const ARMSubtarget *ST) {
7654   assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7655   if (!ST->hasMVEFloatOps())
7656     return SDValue();
7657 
7658   SDLoc dl(BV);
7659   EVT VT = BV.getValueType();
7660   if (VT != MVT::v4f32)
7661     return SDValue();
7662 
7663   // We are looking for a buildvector of fptext elements, where all the
7664   // elements are alternating lanes from a single source. For example <0,2,4,6>
7665   // or <1,3,5,7>. Check the first two items are valid enough and extract some
7666   // info from them (they are checked properly in the loop below).
7667   if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND ||
7668       BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7669     return SDValue();
7670   SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7671   int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1);
7672   if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1))
7673     return SDValue();
7674 
7675   // Check all the values in the BuildVector line up with our expectations.
7676   for (unsigned i = 1; i < 4; i++) {
7677     auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7678       return Trunc.getOpcode() == ISD::FP_EXTEND &&
7679              Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7680              Trunc.getOperand(0).getOperand(0) == Op &&
7681              Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7682     };
7683     if (!Check(BV.getOperand(i), Op0, 2 * i + Offset))
7684       return SDValue();
7685   }
7686 
7687   return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0,
7688                      DAG.getConstant(Offset, dl, MVT::i32));
7689 }
7690 
7691 // If N is an integer constant that can be moved into a register in one
7692 // instruction, return an SDValue of such a constant (will become a MOV
7693 // instruction).  Otherwise return null.
7694 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
7695                                      const ARMSubtarget *ST, const SDLoc &dl) {
7696   uint64_t Val;
7697   if (!isa<ConstantSDNode>(N))
7698     return SDValue();
7699   Val = cast<ConstantSDNode>(N)->getZExtValue();
7700 
7701   if (ST->isThumb1Only()) {
7702     if (Val <= 255 || ~Val <= 255)
7703       return DAG.getConstant(Val, dl, MVT::i32);
7704   } else {
7705     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
7706       return DAG.getConstant(Val, dl, MVT::i32);
7707   }
7708   return SDValue();
7709 }
7710 
7711 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
7712                                     const ARMSubtarget *ST) {
7713   SDLoc dl(Op);
7714   EVT VT = Op.getValueType();
7715 
7716   assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");
7717 
7718   unsigned NumElts = VT.getVectorNumElements();
7719   unsigned BoolMask;
7720   unsigned BitsPerBool;
7721   if (NumElts == 2) {
7722     BitsPerBool = 8;
7723     BoolMask = 0xff;
7724   } else if (NumElts == 4) {
7725     BitsPerBool = 4;
7726     BoolMask = 0xf;
7727   } else if (NumElts == 8) {
7728     BitsPerBool = 2;
7729     BoolMask = 0x3;
7730   } else if (NumElts == 16) {
7731     BitsPerBool = 1;
7732     BoolMask = 0x1;
7733   } else
7734     return SDValue();
7735 
7736   // If this is a single value copied into all lanes (a splat), we can just sign
7737   // extend that single value
7738   SDValue FirstOp = Op.getOperand(0);
7739   if (!isa<ConstantSDNode>(FirstOp) &&
7740       llvm::all_of(llvm::drop_begin(Op->ops()), [&FirstOp](const SDUse &U) {
7741         return U.get().isUndef() || U.get() == FirstOp;
7742       })) {
7743     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
7744                               DAG.getValueType(MVT::i1));
7745     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext);
7746   }
7747 
7748   // First create base with bits set where known
7749   unsigned Bits32 = 0;
7750   for (unsigned i = 0; i < NumElts; ++i) {
7751     SDValue V = Op.getOperand(i);
7752     if (!isa<ConstantSDNode>(V) && !V.isUndef())
7753       continue;
7754     bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
7755     if (BitSet)
7756       Bits32 |= BoolMask << (i * BitsPerBool);
7757   }
7758 
7759   // Add in unknown nodes
7760   SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
7761                              DAG.getConstant(Bits32, dl, MVT::i32));
7762   for (unsigned i = 0; i < NumElts; ++i) {
7763     SDValue V = Op.getOperand(i);
7764     if (isa<ConstantSDNode>(V) || V.isUndef())
7765       continue;
7766     Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
7767                        DAG.getConstant(i, dl, MVT::i32));
7768   }
7769 
7770   return Base;
7771 }
7772 
7773 static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG,
7774                                         const ARMSubtarget *ST) {
7775   if (!ST->hasMVEIntegerOps())
7776     return SDValue();
7777 
7778   // We are looking for a buildvector where each element is Op[0] + i*N
7779   EVT VT = Op.getValueType();
7780   SDValue Op0 = Op.getOperand(0);
7781   unsigned NumElts = VT.getVectorNumElements();
7782 
7783   // Get the increment value from operand 1
7784   SDValue Op1 = Op.getOperand(1);
7785   if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(0) != Op0 ||
7786       !isa<ConstantSDNode>(Op1.getOperand(1)))
7787     return SDValue();
7788   unsigned N = Op1.getConstantOperandVal(1);
7789   if (N != 1 && N != 2 && N != 4 && N != 8)
7790     return SDValue();
7791 
7792   // Check that each other operand matches
7793   for (unsigned I = 2; I < NumElts; I++) {
7794     SDValue OpI = Op.getOperand(I);
7795     if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(0) != Op0 ||
7796         !isa<ConstantSDNode>(OpI.getOperand(1)) ||
7797         OpI.getConstantOperandVal(1) != I * N)
7798       return SDValue();
7799   }
7800 
7801   SDLoc DL(Op);
7802   return DAG.getNode(ARMISD::VIDUP, DL, DAG.getVTList(VT, MVT::i32), Op0,
7803                      DAG.getConstant(N, DL, MVT::i32));
7804 }
7805 
7806 // Returns true if the operation N can be treated as qr instruction variant at
7807 // operand Op.
7808 static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) {
7809   switch (N->getOpcode()) {
7810   case ISD::ADD:
7811   case ISD::MUL:
7812   case ISD::SADDSAT:
7813   case ISD::UADDSAT:
7814     return true;
7815   case ISD::SUB:
7816   case ISD::SSUBSAT:
7817   case ISD::USUBSAT:
7818     return N->getOperand(1).getNode() == Op;
7819   case ISD::INTRINSIC_WO_CHAIN:
7820     switch (N->getConstantOperandVal(0)) {
7821     case Intrinsic::arm_mve_add_predicated:
7822     case Intrinsic::arm_mve_mul_predicated:
7823     case Intrinsic::arm_mve_qadd_predicated:
7824     case Intrinsic::arm_mve_vhadd:
7825     case Intrinsic::arm_mve_hadd_predicated:
7826     case Intrinsic::arm_mve_vqdmulh:
7827     case Intrinsic::arm_mve_qdmulh_predicated:
7828     case Intrinsic::arm_mve_vqrdmulh:
7829     case Intrinsic::arm_mve_qrdmulh_predicated:
7830     case Intrinsic::arm_mve_vqdmull:
7831     case Intrinsic::arm_mve_vqdmull_predicated:
7832       return true;
7833     case Intrinsic::arm_mve_sub_predicated:
7834     case Intrinsic::arm_mve_qsub_predicated:
7835     case Intrinsic::arm_mve_vhsub:
7836     case Intrinsic::arm_mve_hsub_predicated:
7837       return N->getOperand(2).getNode() == Op;
7838     default:
7839       return false;
7840     }
7841   default:
7842     return false;
7843   }
7844 }
7845 
7846 // If this is a case we can't handle, return null and let the default
7847 // expansion code take care of it.
7848 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
7849                                              const ARMSubtarget *ST) const {
7850   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
7851   SDLoc dl(Op);
7852   EVT VT = Op.getValueType();
7853 
7854   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
7855     return LowerBUILD_VECTOR_i1(Op, DAG, ST);
7856 
7857   if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST))
7858     return R;
7859 
7860   APInt SplatBits, SplatUndef;
7861   unsigned SplatBitSize;
7862   bool HasAnyUndefs;
7863   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
7864     if (SplatUndef.isAllOnes())
7865       return DAG.getUNDEF(VT);
7866 
7867     // If all the users of this constant splat are qr instruction variants,
7868     // generate a vdup of the constant.
7869     if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize &&
7870         (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) &&
7871         all_of(BVN->uses(),
7872                [BVN](const SDNode *U) { return IsQRMVEInstruction(U, BVN); })) {
7873       EVT DupVT = SplatBitSize == 32   ? MVT::v4i32
7874                   : SplatBitSize == 16 ? MVT::v8i16
7875                                        : MVT::v16i8;
7876       SDValue Const = DAG.getConstant(SplatBits.getZExtValue(), dl, MVT::i32);
7877       SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const);
7878       return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, VDup);
7879     }
7880 
7881     if ((ST->hasNEON() && SplatBitSize <= 64) ||
7882         (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) {
7883       // Check if an immediate VMOV works.
7884       EVT VmovVT;
7885       SDValue Val =
7886           isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
7887                             SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm);
7888 
7889       if (Val.getNode()) {
7890         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
7891         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7892       }
7893 
7894       // Try an immediate VMVN.
7895       uint64_t NegatedImm = (~SplatBits).getZExtValue();
7896       Val = isVMOVModifiedImm(
7897           NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT,
7898           VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
7899       if (Val.getNode()) {
7900         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
7901         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7902       }
7903 
7904       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
7905       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
7906         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
7907         if (ImmVal != -1) {
7908           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
7909           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
7910         }
7911       }
7912 
7913       // If we are under MVE, generate a VDUP(constant), bitcast to the original
7914       // type.
7915       if (ST->hasMVEIntegerOps() &&
7916           (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) {
7917         EVT DupVT = SplatBitSize == 32   ? MVT::v4i32
7918                     : SplatBitSize == 16 ? MVT::v8i16
7919                                          : MVT::v16i8;
7920         SDValue Const = DAG.getConstant(SplatBits.getZExtValue(), dl, MVT::i32);
7921         SDValue VDup = DAG.getNode(ARMISD::VDUP, dl, DupVT, Const);
7922         return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, VDup);
7923       }
7924     }
7925   }
7926 
7927   // Scan through the operands to see if only one value is used.
7928   //
7929   // As an optimisation, even if more than one value is used it may be more
7930   // profitable to splat with one value then change some lanes.
7931   //
7932   // Heuristically we decide to do this if the vector has a "dominant" value,
7933   // defined as splatted to more than half of the lanes.
7934   unsigned NumElts = VT.getVectorNumElements();
7935   bool isOnlyLowElement = true;
7936   bool usesOnlyOneValue = true;
7937   bool hasDominantValue = false;
7938   bool isConstant = true;
7939 
7940   // Map of the number of times a particular SDValue appears in the
7941   // element list.
7942   DenseMap<SDValue, unsigned> ValueCounts;
7943   SDValue Value;
7944   for (unsigned i = 0; i < NumElts; ++i) {
7945     SDValue V = Op.getOperand(i);
7946     if (V.isUndef())
7947       continue;
7948     if (i > 0)
7949       isOnlyLowElement = false;
7950     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7951       isConstant = false;
7952 
7953     ValueCounts.insert(std::make_pair(V, 0));
7954     unsigned &Count = ValueCounts[V];
7955 
7956     // Is this value dominant? (takes up more than half of the lanes)
7957     if (++Count > (NumElts / 2)) {
7958       hasDominantValue = true;
7959       Value = V;
7960     }
7961   }
7962   if (ValueCounts.size() != 1)
7963     usesOnlyOneValue = false;
7964   if (!Value.getNode() && !ValueCounts.empty())
7965     Value = ValueCounts.begin()->first;
7966 
7967   if (ValueCounts.empty())
7968     return DAG.getUNDEF(VT);
7969 
7970   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
7971   // Keep going if we are hitting this case.
7972   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
7973     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
7974 
7975   unsigned EltSize = VT.getScalarSizeInBits();
7976 
7977   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
7978   // i32 and try again.
7979   if (hasDominantValue && EltSize <= 32) {
7980     if (!isConstant) {
7981       SDValue N;
7982 
7983       // If we are VDUPing a value that comes directly from a vector, that will
7984       // cause an unnecessary move to and from a GPR, where instead we could
7985       // just use VDUPLANE. We can only do this if the lane being extracted
7986       // is at a constant index, as the VDUP from lane instructions only have
7987       // constant-index forms.
7988       ConstantSDNode *constIndex;
7989       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7990           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
7991         // We need to create a new undef vector to use for the VDUPLANE if the
7992         // size of the vector from which we get the value is different than the
7993         // size of the vector that we need to create. We will insert the element
7994         // such that the register coalescer will remove unnecessary copies.
7995         if (VT != Value->getOperand(0).getValueType()) {
7996           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
7997                              VT.getVectorNumElements();
7998           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7999                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
8000                         Value, DAG.getConstant(index, dl, MVT::i32)),
8001                            DAG.getConstant(index, dl, MVT::i32));
8002         } else
8003           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
8004                         Value->getOperand(0), Value->getOperand(1));
8005       } else
8006         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
8007 
8008       if (!usesOnlyOneValue) {
8009         // The dominant value was splatted as 'N', but we now have to insert
8010         // all differing elements.
8011         for (unsigned I = 0; I < NumElts; ++I) {
8012           if (Op.getOperand(I) == Value)
8013             continue;
8014           SmallVector<SDValue, 3> Ops;
8015           Ops.push_back(N);
8016           Ops.push_back(Op.getOperand(I));
8017           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
8018           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
8019         }
8020       }
8021       return N;
8022     }
8023     if (VT.getVectorElementType().isFloatingPoint()) {
8024       SmallVector<SDValue, 8> Ops;
8025       MVT FVT = VT.getVectorElementType().getSimpleVT();
8026       assert(FVT == MVT::f32 || FVT == MVT::f16);
8027       MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
8028       for (unsigned i = 0; i < NumElts; ++i)
8029         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
8030                                   Op.getOperand(i)));
8031       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
8032       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
8033       Val = LowerBUILD_VECTOR(Val, DAG, ST);
8034       if (Val.getNode())
8035         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8036     }
8037     if (usesOnlyOneValue) {
8038       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
8039       if (isConstant && Val.getNode())
8040         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
8041     }
8042   }
8043 
8044   // If all elements are constants and the case above didn't get hit, fall back
8045   // to the default expansion, which will generate a load from the constant
8046   // pool.
8047   if (isConstant)
8048     return SDValue();
8049 
8050   // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and
8051   // vmovn). Empirical tests suggest this is rarely worth it for vectors of
8052   // length <= 2.
8053   if (NumElts >= 4)
8054     if (SDValue shuffle = ReconstructShuffle(Op, DAG))
8055       return shuffle;
8056 
8057   // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into
8058   // VCVT's
8059   if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget))
8060     return VCVT;
8061   if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget))
8062     return VCVT;
8063 
8064   if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
8065     // If we haven't found an efficient lowering, try splitting a 128-bit vector
8066     // into two 64-bit vectors; we might discover a better way to lower it.
8067     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
8068     EVT ExtVT = VT.getVectorElementType();
8069     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
8070     SDValue Lower = DAG.getBuildVector(HVT, dl, ArrayRef(&Ops[0], NumElts / 2));
8071     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
8072       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
8073     SDValue Upper =
8074         DAG.getBuildVector(HVT, dl, ArrayRef(&Ops[NumElts / 2], NumElts / 2));
8075     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
8076       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
8077     if (Lower && Upper)
8078       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
8079   }
8080 
8081   // Vectors with 32- or 64-bit elements can be built by directly assigning
8082   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
8083   // will be legalized.
8084   if (EltSize >= 32) {
8085     // Do the expansion with floating-point types, since that is what the VFP
8086     // registers are defined to use, and since i64 is not legal.
8087     EVT EltVT = EVT::getFloatingPointVT(EltSize);
8088     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
8089     SmallVector<SDValue, 8> Ops;
8090     for (unsigned i = 0; i < NumElts; ++i)
8091       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
8092     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
8093     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8094   }
8095 
8096   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
8097   // know the default expansion would otherwise fall back on something even
8098   // worse. For a vector with one or two non-undef values, that's
8099   // scalar_to_vector for the elements followed by a shuffle (provided the
8100   // shuffle is valid for the target) and materialization element by element
8101   // on the stack followed by a load for everything else.
8102   if (!isConstant && !usesOnlyOneValue) {
8103     SDValue Vec = DAG.getUNDEF(VT);
8104     for (unsigned i = 0 ; i < NumElts; ++i) {
8105       SDValue V = Op.getOperand(i);
8106       if (V.isUndef())
8107         continue;
8108       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
8109       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
8110     }
8111     return Vec;
8112   }
8113 
8114   return SDValue();
8115 }
8116 
8117 // Gather data to see if the operation can be modelled as a
8118 // shuffle in combination with VEXTs.
8119 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
8120                                               SelectionDAG &DAG) const {
8121   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
8122   SDLoc dl(Op);
8123   EVT VT = Op.getValueType();
8124   unsigned NumElts = VT.getVectorNumElements();
8125 
8126   struct ShuffleSourceInfo {
8127     SDValue Vec;
8128     unsigned MinElt = std::numeric_limits<unsigned>::max();
8129     unsigned MaxElt = 0;
8130 
8131     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
8132     // be compatible with the shuffle we intend to construct. As a result
8133     // ShuffleVec will be some sliding window into the original Vec.
8134     SDValue ShuffleVec;
8135 
8136     // Code should guarantee that element i in Vec starts at element "WindowBase
8137     // + i * WindowScale in ShuffleVec".
8138     int WindowBase = 0;
8139     int WindowScale = 1;
8140 
8141     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
8142 
8143     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
8144   };
8145 
8146   // First gather all vectors used as an immediate source for this BUILD_VECTOR
8147   // node.
8148   SmallVector<ShuffleSourceInfo, 2> Sources;
8149   for (unsigned i = 0; i < NumElts; ++i) {
8150     SDValue V = Op.getOperand(i);
8151     if (V.isUndef())
8152       continue;
8153     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
8154       // A shuffle can only come from building a vector from various
8155       // elements of other vectors.
8156       return SDValue();
8157     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
8158       // Furthermore, shuffles require a constant mask, whereas extractelts
8159       // accept variable indices.
8160       return SDValue();
8161     }
8162 
8163     // Add this element source to the list if it's not already there.
8164     SDValue SourceVec = V.getOperand(0);
8165     auto Source = llvm::find(Sources, SourceVec);
8166     if (Source == Sources.end())
8167       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
8168 
8169     // Update the minimum and maximum lane number seen.
8170     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
8171     Source->MinElt = std::min(Source->MinElt, EltNo);
8172     Source->MaxElt = std::max(Source->MaxElt, EltNo);
8173   }
8174 
8175   // Currently only do something sane when at most two source vectors
8176   // are involved.
8177   if (Sources.size() > 2)
8178     return SDValue();
8179 
8180   // Find out the smallest element size among result and two sources, and use
8181   // it as element size to build the shuffle_vector.
8182   EVT SmallestEltTy = VT.getVectorElementType();
8183   for (auto &Source : Sources) {
8184     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
8185     if (SrcEltTy.bitsLT(SmallestEltTy))
8186       SmallestEltTy = SrcEltTy;
8187   }
8188   unsigned ResMultiplier =
8189       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
8190   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
8191   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
8192 
8193   // If the source vector is too wide or too narrow, we may nevertheless be able
8194   // to construct a compatible shuffle either by concatenating it with UNDEF or
8195   // extracting a suitable range of elements.
8196   for (auto &Src : Sources) {
8197     EVT SrcVT = Src.ShuffleVec.getValueType();
8198 
8199     uint64_t SrcVTSize = SrcVT.getFixedSizeInBits();
8200     uint64_t VTSize = VT.getFixedSizeInBits();
8201     if (SrcVTSize == VTSize)
8202       continue;
8203 
8204     // This stage of the search produces a source with the same element type as
8205     // the original, but with a total width matching the BUILD_VECTOR output.
8206     EVT EltVT = SrcVT.getVectorElementType();
8207     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
8208     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
8209 
8210     if (SrcVTSize < VTSize) {
8211       if (2 * SrcVTSize != VTSize)
8212         return SDValue();
8213       // We can pad out the smaller vector for free, so if it's part of a
8214       // shuffle...
8215       Src.ShuffleVec =
8216           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
8217                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
8218       continue;
8219     }
8220 
8221     if (SrcVTSize != 2 * VTSize)
8222       return SDValue();
8223 
8224     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
8225       // Span too large for a VEXT to cope
8226       return SDValue();
8227     }
8228 
8229     if (Src.MinElt >= NumSrcElts) {
8230       // The extraction can just take the second half
8231       Src.ShuffleVec =
8232           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
8233                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
8234       Src.WindowBase = -NumSrcElts;
8235     } else if (Src.MaxElt < NumSrcElts) {
8236       // The extraction can just take the first half
8237       Src.ShuffleVec =
8238           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
8239                       DAG.getConstant(0, dl, MVT::i32));
8240     } else {
8241       // An actual VEXT is needed
8242       SDValue VEXTSrc1 =
8243           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
8244                       DAG.getConstant(0, dl, MVT::i32));
8245       SDValue VEXTSrc2 =
8246           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
8247                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
8248 
8249       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
8250                                    VEXTSrc2,
8251                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
8252       Src.WindowBase = -Src.MinElt;
8253     }
8254   }
8255 
8256   // Another possible incompatibility occurs from the vector element types. We
8257   // can fix this by bitcasting the source vectors to the same type we intend
8258   // for the shuffle.
8259   for (auto &Src : Sources) {
8260     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
8261     if (SrcEltTy == SmallestEltTy)
8262       continue;
8263     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
8264     Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec);
8265     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
8266     Src.WindowBase *= Src.WindowScale;
8267   }
8268 
8269   // Final check before we try to actually produce a shuffle.
8270   LLVM_DEBUG(for (auto Src
8271                   : Sources)
8272                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
8273 
8274   // The stars all align, our next step is to produce the mask for the shuffle.
8275   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
8276   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
8277   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
8278     SDValue Entry = Op.getOperand(i);
8279     if (Entry.isUndef())
8280       continue;
8281 
8282     auto Src = llvm::find(Sources, Entry.getOperand(0));
8283     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
8284 
8285     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
8286     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
8287     // segment.
8288     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
8289     int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
8290                                VT.getScalarSizeInBits());
8291     int LanesDefined = BitsDefined / BitsPerShuffleLane;
8292 
8293     // This source is expected to fill ResMultiplier lanes of the final shuffle,
8294     // starting at the appropriate offset.
8295     int *LaneMask = &Mask[i * ResMultiplier];
8296 
8297     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
8298     ExtractBase += NumElts * (Src - Sources.begin());
8299     for (int j = 0; j < LanesDefined; ++j)
8300       LaneMask[j] = ExtractBase + j;
8301   }
8302 
8303 
8304   // We can't handle more than two sources. This should have already
8305   // been checked before this point.
8306   assert(Sources.size() <= 2 && "Too many sources!");
8307 
8308   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
8309   for (unsigned i = 0; i < Sources.size(); ++i)
8310     ShuffleOps[i] = Sources[i].ShuffleVec;
8311 
8312   SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
8313                                             ShuffleOps[1], Mask, DAG);
8314   if (!Shuffle)
8315     return SDValue();
8316   return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle);
8317 }
8318 
8319 enum ShuffleOpCodes {
8320   OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8321   OP_VREV,
8322   OP_VDUP0,
8323   OP_VDUP1,
8324   OP_VDUP2,
8325   OP_VDUP3,
8326   OP_VEXT1,
8327   OP_VEXT2,
8328   OP_VEXT3,
8329   OP_VUZPL, // VUZP, left result
8330   OP_VUZPR, // VUZP, right result
8331   OP_VZIPL, // VZIP, left result
8332   OP_VZIPR, // VZIP, right result
8333   OP_VTRNL, // VTRN, left result
8334   OP_VTRNR  // VTRN, right result
8335 };
8336 
8337 static bool isLegalMVEShuffleOp(unsigned PFEntry) {
8338   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8339   switch (OpNum) {
8340   case OP_COPY:
8341   case OP_VREV:
8342   case OP_VDUP0:
8343   case OP_VDUP1:
8344   case OP_VDUP2:
8345   case OP_VDUP3:
8346     return true;
8347   }
8348   return false;
8349 }
8350 
8351 /// isShuffleMaskLegal - Targets can use this to indicate that they only
8352 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
8353 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
8354 /// are assumed to be legal.
8355 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
8356   if (VT.getVectorNumElements() == 4 &&
8357       (VT.is128BitVector() || VT.is64BitVector())) {
8358     unsigned PFIndexes[4];
8359     for (unsigned i = 0; i != 4; ++i) {
8360       if (M[i] < 0)
8361         PFIndexes[i] = 8;
8362       else
8363         PFIndexes[i] = M[i];
8364     }
8365 
8366     // Compute the index in the perfect shuffle table.
8367     unsigned PFTableIndex =
8368       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8369     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8370     unsigned Cost = (PFEntry >> 30);
8371 
8372     if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
8373       return true;
8374   }
8375 
8376   bool ReverseVEXT, isV_UNDEF;
8377   unsigned Imm, WhichResult;
8378 
8379   unsigned EltSize = VT.getScalarSizeInBits();
8380   if (EltSize >= 32 ||
8381       ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
8382       ShuffleVectorInst::isIdentityMask(M, M.size()) ||
8383       isVREVMask(M, VT, 64) ||
8384       isVREVMask(M, VT, 32) ||
8385       isVREVMask(M, VT, 16))
8386     return true;
8387   else if (Subtarget->hasNEON() &&
8388            (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
8389             isVTBLMask(M, VT) ||
8390             isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
8391     return true;
8392   else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8393            isReverseMask(M, VT))
8394     return true;
8395   else if (Subtarget->hasMVEIntegerOps() &&
8396            (isVMOVNMask(M, VT, true, false) ||
8397             isVMOVNMask(M, VT, false, false) || isVMOVNMask(M, VT, true, true)))
8398     return true;
8399   else if (Subtarget->hasMVEIntegerOps() &&
8400            (isTruncMask(M, VT, false, false) ||
8401             isTruncMask(M, VT, false, true) ||
8402             isTruncMask(M, VT, true, false) || isTruncMask(M, VT, true, true)))
8403     return true;
8404   else
8405     return false;
8406 }
8407 
8408 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8409 /// the specified operations to build the shuffle.
8410 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8411                                       SDValue RHS, SelectionDAG &DAG,
8412                                       const SDLoc &dl) {
8413   unsigned OpNum = (PFEntry >> 26) & 0x0F;
8414   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8415   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8416 
8417   if (OpNum == OP_COPY) {
8418     if (LHSID == (1*9+2)*9+3) return LHS;
8419     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8420     return RHS;
8421   }
8422 
8423   SDValue OpLHS, OpRHS;
8424   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8425   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8426   EVT VT = OpLHS.getValueType();
8427 
8428   switch (OpNum) {
8429   default: llvm_unreachable("Unknown shuffle opcode!");
8430   case OP_VREV:
8431     // VREV divides the vector in half and swaps within the half.
8432     if (VT.getScalarSizeInBits() == 32)
8433       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
8434     // vrev <4 x i16> -> VREV32
8435     if (VT.getScalarSizeInBits() == 16)
8436       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
8437     // vrev <4 x i8> -> VREV16
8438     assert(VT.getScalarSizeInBits() == 8);
8439     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
8440   case OP_VDUP0:
8441   case OP_VDUP1:
8442   case OP_VDUP2:
8443   case OP_VDUP3:
8444     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
8445                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
8446   case OP_VEXT1:
8447   case OP_VEXT2:
8448   case OP_VEXT3:
8449     return DAG.getNode(ARMISD::VEXT, dl, VT,
8450                        OpLHS, OpRHS,
8451                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
8452   case OP_VUZPL:
8453   case OP_VUZPR:
8454     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
8455                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
8456   case OP_VZIPL:
8457   case OP_VZIPR:
8458     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
8459                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
8460   case OP_VTRNL:
8461   case OP_VTRNR:
8462     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
8463                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
8464   }
8465 }
8466 
8467 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
8468                                        ArrayRef<int> ShuffleMask,
8469                                        SelectionDAG &DAG) {
8470   // Check to see if we can use the VTBL instruction.
8471   SDValue V1 = Op.getOperand(0);
8472   SDValue V2 = Op.getOperand(1);
8473   SDLoc DL(Op);
8474 
8475   SmallVector<SDValue, 8> VTBLMask;
8476   for (int I : ShuffleMask)
8477     VTBLMask.push_back(DAG.getConstant(I, DL, MVT::i32));
8478 
8479   if (V2.getNode()->isUndef())
8480     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
8481                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8482 
8483   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
8484                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8485 }
8486 
8487 static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
8488   SDLoc DL(Op);
8489   EVT VT = Op.getValueType();
8490 
8491   assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8492          "Expect an v8i16/v16i8 type");
8493   SDValue OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, Op.getOperand(0));
8494   // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now,
8495   // extract the first 8 bytes into the top double word and the last 8 bytes
8496   // into the bottom double word, through a new vector shuffle that will be
8497   // turned into a VEXT on Neon, or a couple of VMOVDs on MVE.
8498   std::vector<int> NewMask;
8499   for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++)
8500     NewMask.push_back(VT.getVectorNumElements() / 2 + i);
8501   for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++)
8502     NewMask.push_back(i);
8503   return DAG.getVectorShuffle(VT, DL, OpLHS, OpLHS, NewMask);
8504 }
8505 
8506 static EVT getVectorTyFromPredicateVector(EVT VT) {
8507   switch (VT.getSimpleVT().SimpleTy) {
8508   case MVT::v2i1:
8509     return MVT::v2f64;
8510   case MVT::v4i1:
8511     return MVT::v4i32;
8512   case MVT::v8i1:
8513     return MVT::v8i16;
8514   case MVT::v16i1:
8515     return MVT::v16i8;
8516   default:
8517     llvm_unreachable("Unexpected vector predicate type");
8518   }
8519 }
8520 
8521 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
8522                                     SelectionDAG &DAG) {
8523   // Converting from boolean predicates to integers involves creating a vector
8524   // of all ones or all zeroes and selecting the lanes based upon the real
8525   // predicate.
8526   SDValue AllOnes =
8527       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32);
8528   AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes);
8529 
8530   SDValue AllZeroes =
8531       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32);
8532   AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes);
8533 
8534   // Get full vector type from predicate type
8535   EVT NewVT = getVectorTyFromPredicateVector(VT);
8536 
8537   SDValue RecastV1;
8538   // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
8539   // this to a v16i1. This cannot be done with an ordinary bitcast because the
8540   // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
8541   // since we know in hardware the sizes are really the same.
8542   if (VT != MVT::v16i1)
8543     RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred);
8544   else
8545     RecastV1 = Pred;
8546 
8547   // Select either all ones or zeroes depending upon the real predicate bits.
8548   SDValue PredAsVector =
8549       DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes);
8550 
8551   // Recast our new predicate-as-integer v16i8 vector into something
8552   // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
8553   return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector);
8554 }
8555 
8556 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
8557                                       const ARMSubtarget *ST) {
8558   EVT VT = Op.getValueType();
8559   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8560   ArrayRef<int> ShuffleMask = SVN->getMask();
8561 
8562   assert(ST->hasMVEIntegerOps() &&
8563          "No support for vector shuffle of boolean predicates");
8564 
8565   SDValue V1 = Op.getOperand(0);
8566   SDValue V2 = Op.getOperand(1);
8567   SDLoc dl(Op);
8568   if (isReverseMask(ShuffleMask, VT)) {
8569     SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1);
8570     SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast);
8571     SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit,
8572                               DAG.getConstant(16, dl, MVT::i32));
8573     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl);
8574   }
8575 
8576   // Until we can come up with optimised cases for every single vector
8577   // shuffle in existence we have chosen the least painful strategy. This is
8578   // to essentially promote the boolean predicate to a 8-bit integer, where
8579   // each predicate represents a byte. Then we fall back on a normal integer
8580   // vector shuffle and convert the result back into a predicate vector. In
8581   // many cases the generated code might be even better than scalar code
8582   // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
8583   // fields in a register into 8 other arbitrary 2-bit fields!
8584   SDValue PredAsVector1 = PromoteMVEPredVector(dl, V1, VT, DAG);
8585   EVT NewVT = PredAsVector1.getValueType();
8586   SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(NewVT)
8587                                        : PromoteMVEPredVector(dl, V2, VT, DAG);
8588   assert(PredAsVector2.getValueType() == NewVT &&
8589          "Expected identical vector type in expanded i1 shuffle!");
8590 
8591   // Do the shuffle!
8592   SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector1,
8593                                           PredAsVector2, ShuffleMask);
8594 
8595   // Now return the result of comparing the shuffled vector with zero,
8596   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1
8597   // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s.
8598   if (VT == MVT::v2i1) {
8599     SDValue BC = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Shuffled);
8600     SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, BC,
8601                               DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8602     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp);
8603   }
8604   return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled,
8605                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8606 }
8607 
8608 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
8609                                             ArrayRef<int> ShuffleMask,
8610                                             SelectionDAG &DAG) {
8611   // Attempt to lower the vector shuffle using as many whole register movs as
8612   // possible. This is useful for types smaller than 32bits, which would
8613   // often otherwise become a series for grp movs.
8614   SDLoc dl(Op);
8615   EVT VT = Op.getValueType();
8616   if (VT.getScalarSizeInBits() >= 32)
8617     return SDValue();
8618 
8619   assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8620          "Unexpected vector type");
8621   int NumElts = VT.getVectorNumElements();
8622   int QuarterSize = NumElts / 4;
8623   // The four final parts of the vector, as i32's
8624   SDValue Parts[4];
8625 
8626   // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not
8627   // <u,u,u,u>), returning the vmov lane index
8628   auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) {
8629     // Detect which mov lane this would be from the first non-undef element.
8630     int MovIdx = -1;
8631     for (int i = 0; i < Length; i++) {
8632       if (ShuffleMask[Start + i] >= 0) {
8633         if (ShuffleMask[Start + i] % Length != i)
8634           return -1;
8635         MovIdx = ShuffleMask[Start + i] / Length;
8636         break;
8637       }
8638     }
8639     // If all items are undef, leave this for other combines
8640     if (MovIdx == -1)
8641       return -1;
8642     // Check the remaining values are the correct part of the same mov
8643     for (int i = 1; i < Length; i++) {
8644       if (ShuffleMask[Start + i] >= 0 &&
8645           (ShuffleMask[Start + i] / Length != MovIdx ||
8646            ShuffleMask[Start + i] % Length != i))
8647         return -1;
8648     }
8649     return MovIdx;
8650   };
8651 
8652   for (int Part = 0; Part < 4; ++Part) {
8653     // Does this part look like a mov
8654     int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
8655     if (Elt != -1) {
8656       SDValue Input = Op->getOperand(0);
8657       if (Elt >= 4) {
8658         Input = Op->getOperand(1);
8659         Elt -= 4;
8660       }
8661       SDValue BitCast = DAG.getBitcast(MVT::v4f32, Input);
8662       Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, BitCast,
8663                                 DAG.getConstant(Elt, dl, MVT::i32));
8664     }
8665   }
8666 
8667   // Nothing interesting found, just return
8668   if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
8669     return SDValue();
8670 
8671   // The other parts need to be built with the old shuffle vector, cast to a
8672   // v4i32 and extract_vector_elts
8673   if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
8674     SmallVector<int, 16> NewShuffleMask;
8675     for (int Part = 0; Part < 4; ++Part)
8676       for (int i = 0; i < QuarterSize; i++)
8677         NewShuffleMask.push_back(
8678             Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
8679     SDValue NewShuffle = DAG.getVectorShuffle(
8680         VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask);
8681     SDValue BitCast = DAG.getBitcast(MVT::v4f32, NewShuffle);
8682 
8683     for (int Part = 0; Part < 4; ++Part)
8684       if (!Parts[Part])
8685         Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32,
8686                                   BitCast, DAG.getConstant(Part, dl, MVT::i32));
8687   }
8688   // Build a vector out of the various parts and bitcast it back to the original
8689   // type.
8690   SDValue NewVec = DAG.getNode(ARMISD::BUILD_VECTOR, dl, MVT::v4f32, Parts);
8691   return DAG.getBitcast(VT, NewVec);
8692 }
8693 
8694 static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op,
8695                                               ArrayRef<int> ShuffleMask,
8696                                               SelectionDAG &DAG) {
8697   SDValue V1 = Op.getOperand(0);
8698   SDValue V2 = Op.getOperand(1);
8699   EVT VT = Op.getValueType();
8700   unsigned NumElts = VT.getVectorNumElements();
8701 
8702   // An One-Off Identity mask is one that is mostly an identity mask from as
8703   // single source but contains a single element out-of-place, either from a
8704   // different vector or from another position in the same vector. As opposed to
8705   // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert
8706   // pair directly.
8707   auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset,
8708                                  int &OffElement) {
8709     OffElement = -1;
8710     int NonUndef = 0;
8711     for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
8712       if (Mask[i] == -1)
8713         continue;
8714       NonUndef++;
8715       if (Mask[i] != i + BaseOffset) {
8716         if (OffElement == -1)
8717           OffElement = i;
8718         else
8719           return false;
8720       }
8721     }
8722     return NonUndef > 2 && OffElement != -1;
8723   };
8724   int OffElement;
8725   SDValue VInput;
8726   if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement))
8727     VInput = V1;
8728   else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement))
8729     VInput = V2;
8730   else
8731     return SDValue();
8732 
8733   SDLoc dl(Op);
8734   EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16
8735                 ? MVT::i32
8736                 : VT.getScalarType();
8737   SDValue Elt = DAG.getNode(
8738       ISD::EXTRACT_VECTOR_ELT, dl, SVT,
8739       ShuffleMask[OffElement] < (int)NumElts ? V1 : V2,
8740       DAG.getVectorIdxConstant(ShuffleMask[OffElement] % NumElts, dl));
8741   return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, VInput, Elt,
8742                      DAG.getVectorIdxConstant(OffElement % NumElts, dl));
8743 }
8744 
8745 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
8746                                    const ARMSubtarget *ST) {
8747   SDValue V1 = Op.getOperand(0);
8748   SDValue V2 = Op.getOperand(1);
8749   SDLoc dl(Op);
8750   EVT VT = Op.getValueType();
8751   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8752   unsigned EltSize = VT.getScalarSizeInBits();
8753 
8754   if (ST->hasMVEIntegerOps() && EltSize == 1)
8755     return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST);
8756 
8757   // Convert shuffles that are directly supported on NEON to target-specific
8758   // DAG nodes, instead of keeping them as shuffles and matching them again
8759   // during code selection.  This is more efficient and avoids the possibility
8760   // of inconsistencies between legalization and selection.
8761   // FIXME: floating-point vectors should be canonicalized to integer vectors
8762   // of the same time so that they get CSEd properly.
8763   ArrayRef<int> ShuffleMask = SVN->getMask();
8764 
8765   if (EltSize <= 32) {
8766     if (SVN->isSplat()) {
8767       int Lane = SVN->getSplatIndex();
8768       // If this is undef splat, generate it via "just" vdup, if possible.
8769       if (Lane == -1) Lane = 0;
8770 
8771       // Test if V1 is a SCALAR_TO_VECTOR.
8772       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
8773         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8774       }
8775       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
8776       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
8777       // reaches it).
8778       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
8779           !isa<ConstantSDNode>(V1.getOperand(0))) {
8780         bool IsScalarToVector = true;
8781         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
8782           if (!V1.getOperand(i).isUndef()) {
8783             IsScalarToVector = false;
8784             break;
8785           }
8786         if (IsScalarToVector)
8787           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8788       }
8789       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
8790                          DAG.getConstant(Lane, dl, MVT::i32));
8791     }
8792 
8793     bool ReverseVEXT = false;
8794     unsigned Imm = 0;
8795     if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
8796       if (ReverseVEXT)
8797         std::swap(V1, V2);
8798       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
8799                          DAG.getConstant(Imm, dl, MVT::i32));
8800     }
8801 
8802     if (isVREVMask(ShuffleMask, VT, 64))
8803       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
8804     if (isVREVMask(ShuffleMask, VT, 32))
8805       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
8806     if (isVREVMask(ShuffleMask, VT, 16))
8807       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
8808 
8809     if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
8810       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
8811                          DAG.getConstant(Imm, dl, MVT::i32));
8812     }
8813 
8814     // Check for Neon shuffles that modify both input vectors in place.
8815     // If both results are used, i.e., if there are two shuffles with the same
8816     // source operands and with masks corresponding to both results of one of
8817     // these operations, DAG memoization will ensure that a single node is
8818     // used for both shuffles.
8819     unsigned WhichResult = 0;
8820     bool isV_UNDEF = false;
8821     if (ST->hasNEON()) {
8822       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8823               ShuffleMask, VT, WhichResult, isV_UNDEF)) {
8824         if (isV_UNDEF)
8825           V2 = V1;
8826         return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
8827             .getValue(WhichResult);
8828       }
8829     }
8830     if (ST->hasMVEIntegerOps()) {
8831       if (isVMOVNMask(ShuffleMask, VT, false, false))
8832         return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1,
8833                            DAG.getConstant(0, dl, MVT::i32));
8834       if (isVMOVNMask(ShuffleMask, VT, true, false))
8835         return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2,
8836                            DAG.getConstant(1, dl, MVT::i32));
8837       if (isVMOVNMask(ShuffleMask, VT, true, true))
8838         return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V1,
8839                            DAG.getConstant(1, dl, MVT::i32));
8840     }
8841 
8842     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
8843     // shuffles that produce a result larger than their operands with:
8844     //   shuffle(concat(v1, undef), concat(v2, undef))
8845     // ->
8846     //   shuffle(concat(v1, v2), undef)
8847     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
8848     //
8849     // This is useful in the general case, but there are special cases where
8850     // native shuffles produce larger results: the two-result ops.
8851     //
8852     // Look through the concat when lowering them:
8853     //   shuffle(concat(v1, v2), undef)
8854     // ->
8855     //   concat(VZIP(v1, v2):0, :1)
8856     //
8857     if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
8858       SDValue SubV1 = V1->getOperand(0);
8859       SDValue SubV2 = V1->getOperand(1);
8860       EVT SubVT = SubV1.getValueType();
8861 
8862       // We expect these to have been canonicalized to -1.
8863       assert(llvm::all_of(ShuffleMask, [&](int i) {
8864         return i < (int)VT.getVectorNumElements();
8865       }) && "Unexpected shuffle index into UNDEF operand!");
8866 
8867       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8868               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
8869         if (isV_UNDEF)
8870           SubV2 = SubV1;
8871         assert((WhichResult == 0) &&
8872                "In-place shuffle of concat can only have one result!");
8873         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
8874                                   SubV1, SubV2);
8875         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
8876                            Res.getValue(1));
8877       }
8878     }
8879   }
8880 
8881   if (ST->hasMVEIntegerOps() && EltSize <= 32) {
8882     if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG))
8883       return V;
8884 
8885     for (bool Top : {false, true}) {
8886       for (bool SingleSource : {false, true}) {
8887         if (isTruncMask(ShuffleMask, VT, Top, SingleSource)) {
8888           MVT FromSVT = MVT::getIntegerVT(EltSize * 2);
8889           MVT FromVT = MVT::getVectorVT(FromSVT, ShuffleMask.size() / 2);
8890           SDValue Lo = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, FromVT, V1);
8891           SDValue Hi = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, FromVT,
8892                                    SingleSource ? V1 : V2);
8893           if (Top) {
8894             SDValue Amt = DAG.getConstant(EltSize, dl, FromVT);
8895             Lo = DAG.getNode(ISD::SRL, dl, FromVT, Lo, Amt);
8896             Hi = DAG.getNode(ISD::SRL, dl, FromVT, Hi, Amt);
8897           }
8898           return DAG.getNode(ARMISD::MVETRUNC, dl, VT, Lo, Hi);
8899         }
8900       }
8901     }
8902   }
8903 
8904   // If the shuffle is not directly supported and it has 4 elements, use
8905   // the PerfectShuffle-generated table to synthesize it from other shuffles.
8906   unsigned NumElts = VT.getVectorNumElements();
8907   if (NumElts == 4) {
8908     unsigned PFIndexes[4];
8909     for (unsigned i = 0; i != 4; ++i) {
8910       if (ShuffleMask[i] < 0)
8911         PFIndexes[i] = 8;
8912       else
8913         PFIndexes[i] = ShuffleMask[i];
8914     }
8915 
8916     // Compute the index in the perfect shuffle table.
8917     unsigned PFTableIndex =
8918       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8919     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8920     unsigned Cost = (PFEntry >> 30);
8921 
8922     if (Cost <= 4) {
8923       if (ST->hasNEON())
8924         return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8925       else if (isLegalMVEShuffleOp(PFEntry)) {
8926         unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8927         unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8928         unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
8929         unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
8930         if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
8931           return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8932       }
8933     }
8934   }
8935 
8936   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
8937   if (EltSize >= 32) {
8938     // Do the expansion with floating-point types, since that is what the VFP
8939     // registers are defined to use, and since i64 is not legal.
8940     EVT EltVT = EVT::getFloatingPointVT(EltSize);
8941     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
8942     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
8943     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
8944     SmallVector<SDValue, 8> Ops;
8945     for (unsigned i = 0; i < NumElts; ++i) {
8946       if (ShuffleMask[i] < 0)
8947         Ops.push_back(DAG.getUNDEF(EltVT));
8948       else
8949         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
8950                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
8951                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
8952                                                   dl, MVT::i32)));
8953     }
8954     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
8955     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8956   }
8957 
8958   if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8959       isReverseMask(ShuffleMask, VT))
8960     return LowerReverse_VECTOR_SHUFFLE(Op, DAG);
8961 
8962   if (ST->hasNEON() && VT == MVT::v8i8)
8963     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
8964       return NewOp;
8965 
8966   if (ST->hasMVEIntegerOps())
8967     if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG))
8968       return NewOp;
8969 
8970   return SDValue();
8971 }
8972 
8973 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8974                                          const ARMSubtarget *ST) {
8975   EVT VecVT = Op.getOperand(0).getValueType();
8976   SDLoc dl(Op);
8977 
8978   assert(ST->hasMVEIntegerOps() &&
8979          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8980 
8981   SDValue Conv =
8982       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8983   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
8984   unsigned LaneWidth =
8985       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8986   unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
8987   SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32,
8988                             Op.getOperand(1), DAG.getValueType(MVT::i1));
8989   SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext,
8990                             DAG.getConstant(~Mask, dl, MVT::i32));
8991   return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI);
8992 }
8993 
8994 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
8995                                                   SelectionDAG &DAG) const {
8996   // INSERT_VECTOR_ELT is legal only for immediate indexes.
8997   SDValue Lane = Op.getOperand(2);
8998   if (!isa<ConstantSDNode>(Lane))
8999     return SDValue();
9000 
9001   SDValue Elt = Op.getOperand(1);
9002   EVT EltVT = Elt.getValueType();
9003 
9004   if (Subtarget->hasMVEIntegerOps() &&
9005       Op.getValueType().getScalarSizeInBits() == 1)
9006     return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget);
9007 
9008   if (getTypeAction(*DAG.getContext(), EltVT) ==
9009       TargetLowering::TypePromoteFloat) {
9010     // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
9011     // but the type system will try to do that if we don't intervene.
9012     // Reinterpret any such vector-element insertion as one with the
9013     // corresponding integer types.
9014 
9015     SDLoc dl(Op);
9016 
9017     EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
9018     assert(getTypeAction(*DAG.getContext(), IEltVT) !=
9019            TargetLowering::TypePromoteFloat);
9020 
9021     SDValue VecIn = Op.getOperand(0);
9022     EVT VecVT = VecIn.getValueType();
9023     EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
9024                                   VecVT.getVectorNumElements());
9025 
9026     SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
9027     SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
9028     SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
9029                                   IVecIn, IElt, Lane);
9030     return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
9031   }
9032 
9033   return Op;
9034 }
9035 
9036 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
9037                                           const ARMSubtarget *ST) {
9038   EVT VecVT = Op.getOperand(0).getValueType();
9039   SDLoc dl(Op);
9040 
9041   assert(ST->hasMVEIntegerOps() &&
9042          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
9043 
9044   SDValue Conv =
9045       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
9046   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
9047   unsigned LaneWidth =
9048       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
9049   SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
9050                               DAG.getConstant(Lane * LaneWidth, dl, MVT::i32));
9051   return Shift;
9052 }
9053 
9054 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
9055                                        const ARMSubtarget *ST) {
9056   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
9057   SDValue Lane = Op.getOperand(1);
9058   if (!isa<ConstantSDNode>(Lane))
9059     return SDValue();
9060 
9061   SDValue Vec = Op.getOperand(0);
9062   EVT VT = Vec.getValueType();
9063 
9064   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
9065     return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST);
9066 
9067   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
9068     SDLoc dl(Op);
9069     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
9070   }
9071 
9072   return Op;
9073 }
9074 
9075 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
9076                                       const ARMSubtarget *ST) {
9077   SDLoc dl(Op);
9078   assert(Op.getValueType().getScalarSizeInBits() == 1 &&
9079          "Unexpected custom CONCAT_VECTORS lowering");
9080   assert(isPowerOf2_32(Op.getNumOperands()) &&
9081          "Unexpected custom CONCAT_VECTORS lowering");
9082   assert(ST->hasMVEIntegerOps() &&
9083          "CONCAT_VECTORS lowering only supported for MVE");
9084 
9085   auto ConcatPair = [&](SDValue V1, SDValue V2) {
9086     EVT Op1VT = V1.getValueType();
9087     EVT Op2VT = V2.getValueType();
9088     assert(Op1VT == Op2VT && "Operand types don't match!");
9089     assert((Op1VT == MVT::v2i1 || Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) &&
9090            "Unexpected i1 concat operations!");
9091     EVT VT = Op1VT.getDoubleNumVectorElementsVT(*DAG.getContext());
9092 
9093     SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
9094     SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG);
9095 
9096     // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
9097     // promoted to v8i16, etc.
9098     MVT ElType =
9099         getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
9100     unsigned NumElts = 2 * Op1VT.getVectorNumElements();
9101 
9102     EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
9103     if (Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) {
9104       // Use MVETRUNC to truncate the combined NewV1::NewV2 into the smaller
9105       // ConcatVT.
9106       SDValue ConVec =
9107           DAG.getNode(ARMISD::MVETRUNC, dl, ConcatVT, NewV1, NewV2);
9108       return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
9109                          DAG.getConstant(ARMCC::NE, dl, MVT::i32));
9110     }
9111 
9112     // Extract the vector elements from Op1 and Op2 one by one and truncate them
9113     // to be the right size for the destination. For example, if Op1 is v4i1
9114     // then the promoted vector is v4i32. The result of concatenation gives a
9115     // v8i1, which when promoted is v8i16. That means each i32 element from Op1
9116     // needs truncating to i16 and inserting in the result.
9117     auto ExtractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) {
9118       EVT NewVT = NewV.getValueType();
9119       EVT ConcatVT = ConVec.getValueType();
9120       unsigned ExtScale = 1;
9121       if (NewVT == MVT::v2f64) {
9122         NewV = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, NewV);
9123         ExtScale = 2;
9124       }
9125       for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) {
9126         SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV,
9127                                   DAG.getIntPtrConstant(i * ExtScale, dl));
9128         ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt,
9129                              DAG.getConstant(j, dl, MVT::i32));
9130       }
9131       return ConVec;
9132     };
9133     unsigned j = 0;
9134     SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT);
9135     ConVec = ExtractInto(NewV1, ConVec, j);
9136     ConVec = ExtractInto(NewV2, ConVec, j);
9137 
9138     // Now return the result of comparing the subvector with zero, which will
9139     // generate a real predicate, i.e. v4i1, v8i1 or v16i1.
9140     return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
9141                        DAG.getConstant(ARMCC::NE, dl, MVT::i32));
9142   };
9143 
9144   // Concat each pair of subvectors and pack into the lower half of the array.
9145   SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
9146   while (ConcatOps.size() > 1) {
9147     for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
9148       SDValue V1 = ConcatOps[I];
9149       SDValue V2 = ConcatOps[I + 1];
9150       ConcatOps[I / 2] = ConcatPair(V1, V2);
9151     }
9152     ConcatOps.resize(ConcatOps.size() / 2);
9153   }
9154   return ConcatOps[0];
9155 }
9156 
9157 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9158                                    const ARMSubtarget *ST) {
9159   EVT VT = Op->getValueType(0);
9160   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
9161     return LowerCONCAT_VECTORS_i1(Op, DAG, ST);
9162 
9163   // The only time a CONCAT_VECTORS operation can have legal types is when
9164   // two 64-bit vectors are concatenated to a 128-bit vector.
9165   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
9166          "unexpected CONCAT_VECTORS");
9167   SDLoc dl(Op);
9168   SDValue Val = DAG.getUNDEF(MVT::v2f64);
9169   SDValue Op0 = Op.getOperand(0);
9170   SDValue Op1 = Op.getOperand(1);
9171   if (!Op0.isUndef())
9172     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
9173                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
9174                       DAG.getIntPtrConstant(0, dl));
9175   if (!Op1.isUndef())
9176     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
9177                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
9178                       DAG.getIntPtrConstant(1, dl));
9179   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
9180 }
9181 
9182 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
9183                                       const ARMSubtarget *ST) {
9184   SDValue V1 = Op.getOperand(0);
9185   SDValue V2 = Op.getOperand(1);
9186   SDLoc dl(Op);
9187   EVT VT = Op.getValueType();
9188   EVT Op1VT = V1.getValueType();
9189   unsigned NumElts = VT.getVectorNumElements();
9190   unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
9191 
9192   assert(VT.getScalarSizeInBits() == 1 &&
9193          "Unexpected custom EXTRACT_SUBVECTOR lowering");
9194   assert(ST->hasMVEIntegerOps() &&
9195          "EXTRACT_SUBVECTOR lowering only supported for MVE");
9196 
9197   SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
9198 
9199   // We now have Op1 promoted to a vector of integers, where v8i1 gets
9200   // promoted to v8i16, etc.
9201 
9202   MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
9203 
9204   if (NumElts == 2) {
9205     EVT SubVT = MVT::v4i32;
9206     SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
9207     for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) {
9208       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
9209                                 DAG.getIntPtrConstant(i, dl));
9210       SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
9211                            DAG.getConstant(j, dl, MVT::i32));
9212       SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
9213                            DAG.getConstant(j + 1, dl, MVT::i32));
9214     }
9215     SDValue Cmp = DAG.getNode(ARMISD::VCMPZ, dl, MVT::v4i1, SubVec,
9216                               DAG.getConstant(ARMCC::NE, dl, MVT::i32));
9217     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v2i1, Cmp);
9218   }
9219 
9220   EVT SubVT = MVT::getVectorVT(ElType, NumElts);
9221   SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
9222   for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
9223     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
9224                               DAG.getIntPtrConstant(i, dl));
9225     SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
9226                          DAG.getConstant(j, dl, MVT::i32));
9227   }
9228 
9229   // Now return the result of comparing the subvector with zero,
9230   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
9231   return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
9232                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
9233 }
9234 
9235 // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0).
9236 static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG,
9237                                const ARMSubtarget *ST) {
9238   assert(ST->hasMVEIntegerOps() && "Expected MVE!");
9239   EVT VT = N->getValueType(0);
9240   assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) &&
9241          "Expected a vector i1 type!");
9242   SDValue Op = N->getOperand(0);
9243   EVT FromVT = Op.getValueType();
9244   SDLoc DL(N);
9245 
9246   SDValue And =
9247       DAG.getNode(ISD::AND, DL, FromVT, Op, DAG.getConstant(1, DL, FromVT));
9248   return DAG.getNode(ISD::SETCC, DL, VT, And, DAG.getConstant(0, DL, FromVT),
9249                      DAG.getCondCode(ISD::SETNE));
9250 }
9251 
9252 static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG,
9253                              const ARMSubtarget *Subtarget) {
9254   if (!Subtarget->hasMVEIntegerOps())
9255     return SDValue();
9256 
9257   EVT ToVT = N->getValueType(0);
9258   if (ToVT.getScalarType() == MVT::i1)
9259     return LowerTruncatei1(N, DAG, Subtarget);
9260 
9261   // MVE does not have a single instruction to perform the truncation of a v4i32
9262   // into the lower half of a v8i16, in the same way that a NEON vmovn would.
9263   // Most of the instructions in MVE follow the 'Beats' system, where moving
9264   // values from different lanes is usually something that the instructions
9265   // avoid.
9266   //
9267   // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B,
9268   // which take a the top/bottom half of a larger lane and extend it (or do the
9269   // opposite, truncating into the top/bottom lane from a larger lane). Note
9270   // that because of the way we widen lanes, a v4i16 is really a v4i32 using the
9271   // bottom 16bits from each vector lane. This works really well with T/B
9272   // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need
9273   // to move order.
9274   //
9275   // But truncates and sext/zext are always going to be fairly common from llvm.
9276   // We have several options for how to deal with them:
9277   // - Wherever possible combine them into an instruction that makes them
9278   //   "free". This includes loads/stores, which can perform the trunc as part
9279   //   of the memory operation. Or certain shuffles that can be turned into
9280   //   VMOVN/VMOVL.
9281   // - Lane Interleaving to transform blocks surrounded by ext/trunc. So
9282   //   trunc(mul(sext(a), sext(b))) may become
9283   //   VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in
9284   //   this case can use VMULL). This is performed in the
9285   //   MVELaneInterleavingPass.
9286   // - Otherwise we have an option. By default we would expand the
9287   //   zext/sext/trunc into a series of lane extract/inserts going via GPR
9288   //   registers. One for each vector lane in the vector. This can obviously be
9289   //   very expensive.
9290   // - The other option is to use the fact that loads/store can extend/truncate
9291   //   to turn a trunc into two truncating stack stores and a stack reload. This
9292   //   becomes 3 back-to-back memory operations, but at least that is less than
9293   //   all the insert/extracts.
9294   //
9295   // In order to do the last, we convert certain trunc's into MVETRUNC, which
9296   // are either optimized where they can be, or eventually lowered into stack
9297   // stores/loads. This prevents us from splitting a v8i16 trunc into two stores
9298   // two early, where other instructions would be better, and stops us from
9299   // having to reconstruct multiple buildvector shuffles into loads/stores.
9300   if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8)
9301     return SDValue();
9302   EVT FromVT = N->getOperand(0).getValueType();
9303   if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16)
9304     return SDValue();
9305 
9306   SDValue Lo, Hi;
9307   std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
9308   SDLoc DL(N);
9309   return DAG.getNode(ARMISD::MVETRUNC, DL, ToVT, Lo, Hi);
9310 }
9311 
9312 static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG,
9313                                  const ARMSubtarget *Subtarget) {
9314   if (!Subtarget->hasMVEIntegerOps())
9315     return SDValue();
9316 
9317   // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC.
9318 
9319   EVT ToVT = N->getValueType(0);
9320   if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16)
9321     return SDValue();
9322   SDValue Op = N->getOperand(0);
9323   EVT FromVT = Op.getValueType();
9324   if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8)
9325     return SDValue();
9326 
9327   SDLoc DL(N);
9328   EVT ExtVT = ToVT.getHalfNumVectorElementsVT(*DAG.getContext());
9329   if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8)
9330     ExtVT = MVT::v8i16;
9331 
9332   unsigned Opcode =
9333       N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT;
9334   SDValue Ext = DAG.getNode(Opcode, DL, DAG.getVTList(ExtVT, ExtVT), Op);
9335   SDValue Ext1 = Ext.getValue(1);
9336 
9337   if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) {
9338     Ext = DAG.getNode(N->getOpcode(), DL, MVT::v8i32, Ext);
9339     Ext1 = DAG.getNode(N->getOpcode(), DL, MVT::v8i32, Ext1);
9340   }
9341 
9342   return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Ext, Ext1);
9343 }
9344 
9345 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
9346 /// element has been zero/sign-extended, depending on the isSigned parameter,
9347 /// from an integer type half its size.
9348 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
9349                                    bool isSigned) {
9350   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
9351   EVT VT = N->getValueType(0);
9352   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
9353     SDNode *BVN = N->getOperand(0).getNode();
9354     if (BVN->getValueType(0) != MVT::v4i32 ||
9355         BVN->getOpcode() != ISD::BUILD_VECTOR)
9356       return false;
9357     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9358     unsigned HiElt = 1 - LoElt;
9359     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
9360     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
9361     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
9362     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
9363     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
9364       return false;
9365     if (isSigned) {
9366       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
9367           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
9368         return true;
9369     } else {
9370       if (Hi0->isZero() && Hi1->isZero())
9371         return true;
9372     }
9373     return false;
9374   }
9375 
9376   if (N->getOpcode() != ISD::BUILD_VECTOR)
9377     return false;
9378 
9379   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
9380     SDNode *Elt = N->getOperand(i).getNode();
9381     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
9382       unsigned EltSize = VT.getScalarSizeInBits();
9383       unsigned HalfSize = EltSize / 2;
9384       if (isSigned) {
9385         if (!isIntN(HalfSize, C->getSExtValue()))
9386           return false;
9387       } else {
9388         if (!isUIntN(HalfSize, C->getZExtValue()))
9389           return false;
9390       }
9391       continue;
9392     }
9393     return false;
9394   }
9395 
9396   return true;
9397 }
9398 
9399 /// isSignExtended - Check if a node is a vector value that is sign-extended
9400 /// or a constant BUILD_VECTOR with sign-extended elements.
9401 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
9402   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
9403     return true;
9404   if (isExtendedBUILD_VECTOR(N, DAG, true))
9405     return true;
9406   return false;
9407 }
9408 
9409 /// isZeroExtended - Check if a node is a vector value that is zero-extended (or
9410 /// any-extended) or a constant BUILD_VECTOR with zero-extended elements.
9411 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
9412   if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND ||
9413       ISD::isZEXTLoad(N))
9414     return true;
9415   if (isExtendedBUILD_VECTOR(N, DAG, false))
9416     return true;
9417   return false;
9418 }
9419 
9420 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
9421   if (OrigVT.getSizeInBits() >= 64)
9422     return OrigVT;
9423 
9424   assert(OrigVT.isSimple() && "Expecting a simple value type");
9425 
9426   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
9427   switch (OrigSimpleTy) {
9428   default: llvm_unreachable("Unexpected Vector Type");
9429   case MVT::v2i8:
9430   case MVT::v2i16:
9431      return MVT::v2i32;
9432   case MVT::v4i8:
9433     return  MVT::v4i16;
9434   }
9435 }
9436 
9437 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
9438 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
9439 /// We insert the required extension here to get the vector to fill a D register.
9440 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
9441                                             const EVT &OrigTy,
9442                                             const EVT &ExtTy,
9443                                             unsigned ExtOpcode) {
9444   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
9445   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
9446   // 64-bits we need to insert a new extension so that it will be 64-bits.
9447   assert(ExtTy.is128BitVector() && "Unexpected extension size");
9448   if (OrigTy.getSizeInBits() >= 64)
9449     return N;
9450 
9451   // Must extend size to at least 64 bits to be used as an operand for VMULL.
9452   EVT NewVT = getExtensionTo64Bits(OrigTy);
9453 
9454   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
9455 }
9456 
9457 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
9458 /// does not do any sign/zero extension. If the original vector is less
9459 /// than 64 bits, an appropriate extension will be added after the load to
9460 /// reach a total size of 64 bits. We have to add the extension separately
9461 /// because ARM does not have a sign/zero extending load for vectors.
9462 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
9463   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
9464 
9465   // The load already has the right type.
9466   if (ExtendedTy == LD->getMemoryVT())
9467     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
9468                        LD->getBasePtr(), LD->getPointerInfo(), LD->getAlign(),
9469                        LD->getMemOperand()->getFlags());
9470 
9471   // We need to create a zextload/sextload. We cannot just create a load
9472   // followed by a zext/zext node because LowerMUL is also run during normal
9473   // operation legalization where we can't create illegal types.
9474   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
9475                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
9476                         LD->getMemoryVT(), LD->getAlign(),
9477                         LD->getMemOperand()->getFlags());
9478 }
9479 
9480 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
9481 /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return
9482 /// the unextended value. The unextended vector should be 64 bits so that it can
9483 /// be used as an operand to a VMULL instruction. If the original vector size
9484 /// before extension is less than 64 bits we add a an extension to resize
9485 /// the vector to 64 bits.
9486 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
9487   if (N->getOpcode() == ISD::SIGN_EXTEND ||
9488       N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
9489     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
9490                                         N->getOperand(0)->getValueType(0),
9491                                         N->getValueType(0),
9492                                         N->getOpcode());
9493 
9494   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
9495     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
9496            "Expected extending load");
9497 
9498     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
9499     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
9500     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
9501     SDValue extLoad =
9502         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
9503     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
9504 
9505     return newLoad;
9506   }
9507 
9508   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
9509   // have been legalized as a BITCAST from v4i32.
9510   if (N->getOpcode() == ISD::BITCAST) {
9511     SDNode *BVN = N->getOperand(0).getNode();
9512     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
9513            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
9514     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9515     return DAG.getBuildVector(
9516         MVT::v2i32, SDLoc(N),
9517         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
9518   }
9519   // Construct a new BUILD_VECTOR with elements truncated to half the size.
9520   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
9521   EVT VT = N->getValueType(0);
9522   unsigned EltSize = VT.getScalarSizeInBits() / 2;
9523   unsigned NumElts = VT.getVectorNumElements();
9524   MVT TruncVT = MVT::getIntegerVT(EltSize);
9525   SmallVector<SDValue, 8> Ops;
9526   SDLoc dl(N);
9527   for (unsigned i = 0; i != NumElts; ++i) {
9528     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
9529     const APInt &CInt = C->getAPIntValue();
9530     // Element types smaller than 32 bits are not legal, so use i32 elements.
9531     // The values are implicitly truncated so sext vs. zext doesn't matter.
9532     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
9533   }
9534   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
9535 }
9536 
9537 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
9538   unsigned Opcode = N->getOpcode();
9539   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
9540     SDNode *N0 = N->getOperand(0).getNode();
9541     SDNode *N1 = N->getOperand(1).getNode();
9542     return N0->hasOneUse() && N1->hasOneUse() &&
9543       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
9544   }
9545   return false;
9546 }
9547 
9548 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
9549   unsigned Opcode = N->getOpcode();
9550   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
9551     SDNode *N0 = N->getOperand(0).getNode();
9552     SDNode *N1 = N->getOperand(1).getNode();
9553     return N0->hasOneUse() && N1->hasOneUse() &&
9554       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
9555   }
9556   return false;
9557 }
9558 
9559 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
9560   // Multiplications are only custom-lowered for 128-bit vectors so that
9561   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
9562   EVT VT = Op.getValueType();
9563   assert(VT.is128BitVector() && VT.isInteger() &&
9564          "unexpected type for custom-lowering ISD::MUL");
9565   SDNode *N0 = Op.getOperand(0).getNode();
9566   SDNode *N1 = Op.getOperand(1).getNode();
9567   unsigned NewOpc = 0;
9568   bool isMLA = false;
9569   bool isN0SExt = isSignExtended(N0, DAG);
9570   bool isN1SExt = isSignExtended(N1, DAG);
9571   if (isN0SExt && isN1SExt)
9572     NewOpc = ARMISD::VMULLs;
9573   else {
9574     bool isN0ZExt = isZeroExtended(N0, DAG);
9575     bool isN1ZExt = isZeroExtended(N1, DAG);
9576     if (isN0ZExt && isN1ZExt)
9577       NewOpc = ARMISD::VMULLu;
9578     else if (isN1SExt || isN1ZExt) {
9579       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
9580       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
9581       if (isN1SExt && isAddSubSExt(N0, DAG)) {
9582         NewOpc = ARMISD::VMULLs;
9583         isMLA = true;
9584       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
9585         NewOpc = ARMISD::VMULLu;
9586         isMLA = true;
9587       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
9588         std::swap(N0, N1);
9589         NewOpc = ARMISD::VMULLu;
9590         isMLA = true;
9591       }
9592     }
9593 
9594     if (!NewOpc) {
9595       if (VT == MVT::v2i64)
9596         // Fall through to expand this.  It is not legal.
9597         return SDValue();
9598       else
9599         // Other vector multiplications are legal.
9600         return Op;
9601     }
9602   }
9603 
9604   // Legalize to a VMULL instruction.
9605   SDLoc DL(Op);
9606   SDValue Op0;
9607   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
9608   if (!isMLA) {
9609     Op0 = SkipExtensionForVMULL(N0, DAG);
9610     assert(Op0.getValueType().is64BitVector() &&
9611            Op1.getValueType().is64BitVector() &&
9612            "unexpected types for extended operands to VMULL");
9613     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
9614   }
9615 
9616   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
9617   // isel lowering to take advantage of no-stall back to back vmul + vmla.
9618   //   vmull q0, d4, d6
9619   //   vmlal q0, d5, d6
9620   // is faster than
9621   //   vaddl q0, d4, d5
9622   //   vmovl q1, d6
9623   //   vmul  q0, q0, q1
9624   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
9625   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
9626   EVT Op1VT = Op1.getValueType();
9627   return DAG.getNode(N0->getOpcode(), DL, VT,
9628                      DAG.getNode(NewOpc, DL, VT,
9629                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
9630                      DAG.getNode(NewOpc, DL, VT,
9631                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
9632 }
9633 
9634 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
9635                               SelectionDAG &DAG) {
9636   // TODO: Should this propagate fast-math-flags?
9637 
9638   // Convert to float
9639   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
9640   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
9641   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
9642   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
9643   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
9644   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
9645   // Get reciprocal estimate.
9646   // float4 recip = vrecpeq_f32(yf);
9647   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9648                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
9649                    Y);
9650   // Because char has a smaller range than uchar, we can actually get away
9651   // without any newton steps.  This requires that we use a weird bias
9652   // of 0xb000, however (again, this has been exhaustively tested).
9653   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
9654   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
9655   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
9656   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
9657   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
9658   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
9659   // Convert back to short.
9660   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
9661   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
9662   return X;
9663 }
9664 
9665 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
9666                                SelectionDAG &DAG) {
9667   // TODO: Should this propagate fast-math-flags?
9668 
9669   SDValue N2;
9670   // Convert to float.
9671   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
9672   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
9673   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
9674   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
9675   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
9676   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
9677 
9678   // Use reciprocal estimate and one refinement step.
9679   // float4 recip = vrecpeq_f32(yf);
9680   // recip *= vrecpsq_f32(yf, recip);
9681   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9682                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
9683                    N1);
9684   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9685                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9686                    N1, N2);
9687   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9688   // Because short has a smaller range than ushort, we can actually get away
9689   // with only a single newton step.  This requires that we use a weird bias
9690   // of 89, however (again, this has been exhaustively tested).
9691   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
9692   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9693   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9694   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
9695   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9696   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9697   // Convert back to integer and return.
9698   // return vmovn_s32(vcvt_s32_f32(result));
9699   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9700   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9701   return N0;
9702 }
9703 
9704 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
9705                          const ARMSubtarget *ST) {
9706   EVT VT = Op.getValueType();
9707   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9708          "unexpected type for custom-lowering ISD::SDIV");
9709 
9710   SDLoc dl(Op);
9711   SDValue N0 = Op.getOperand(0);
9712   SDValue N1 = Op.getOperand(1);
9713   SDValue N2, N3;
9714 
9715   if (VT == MVT::v8i8) {
9716     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
9717     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
9718 
9719     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9720                      DAG.getIntPtrConstant(4, dl));
9721     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9722                      DAG.getIntPtrConstant(4, dl));
9723     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9724                      DAG.getIntPtrConstant(0, dl));
9725     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9726                      DAG.getIntPtrConstant(0, dl));
9727 
9728     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
9729     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
9730 
9731     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9732     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9733 
9734     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
9735     return N0;
9736   }
9737   return LowerSDIV_v4i16(N0, N1, dl, DAG);
9738 }
9739 
9740 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
9741                          const ARMSubtarget *ST) {
9742   // TODO: Should this propagate fast-math-flags?
9743   EVT VT = Op.getValueType();
9744   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9745          "unexpected type for custom-lowering ISD::UDIV");
9746 
9747   SDLoc dl(Op);
9748   SDValue N0 = Op.getOperand(0);
9749   SDValue N1 = Op.getOperand(1);
9750   SDValue N2, N3;
9751 
9752   if (VT == MVT::v8i8) {
9753     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
9754     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
9755 
9756     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9757                      DAG.getIntPtrConstant(4, dl));
9758     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9759                      DAG.getIntPtrConstant(4, dl));
9760     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9761                      DAG.getIntPtrConstant(0, dl));
9762     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9763                      DAG.getIntPtrConstant(0, dl));
9764 
9765     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
9766     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
9767 
9768     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9769     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9770 
9771     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
9772                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
9773                                      MVT::i32),
9774                      N0);
9775     return N0;
9776   }
9777 
9778   // v4i16 sdiv ... Convert to float.
9779   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
9780   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
9781   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
9782   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
9783   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
9784   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
9785 
9786   // Use reciprocal estimate and two refinement steps.
9787   // float4 recip = vrecpeq_f32(yf);
9788   // recip *= vrecpsq_f32(yf, recip);
9789   // recip *= vrecpsq_f32(yf, recip);
9790   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9791                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
9792                    BN1);
9793   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9794                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9795                    BN1, N2);
9796   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9797   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9798                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9799                    BN1, N2);
9800   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9801   // Simply multiplying by the reciprocal estimate can leave us a few ulps
9802   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
9803   // and that it will never cause us to return an answer too large).
9804   // float4 result = as_float4(as_int4(xf*recip) + 2);
9805   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9806   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9807   N1 = DAG.getConstant(2, dl, MVT::v4i32);
9808   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9809   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9810   // Convert back to integer and return.
9811   // return vmovn_u32(vcvt_s32_f32(result));
9812   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9813   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9814   return N0;
9815 }
9816 
9817 static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {
9818   SDNode *N = Op.getNode();
9819   EVT VT = N->getValueType(0);
9820   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
9821 
9822   SDValue Carry = Op.getOperand(2);
9823 
9824   SDLoc DL(Op);
9825 
9826   SDValue Result;
9827   if (Op.getOpcode() == ISD::UADDO_CARRY) {
9828     // This converts the boolean value carry into the carry flag.
9829     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9830 
9831     // Do the addition proper using the carry flag we wanted.
9832     Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
9833                          Op.getOperand(1), Carry);
9834 
9835     // Now convert the carry flag into a boolean value.
9836     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9837   } else {
9838     // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we
9839     // have to invert the carry first.
9840     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9841                         DAG.getConstant(1, DL, MVT::i32), Carry);
9842     // This converts the boolean value carry into the carry flag.
9843     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9844 
9845     // Do the subtraction proper using the carry flag we wanted.
9846     Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
9847                          Op.getOperand(1), Carry);
9848 
9849     // Now convert the carry flag into a boolean value.
9850     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9851     // But the carry returned by ARMISD::SUBE is not a borrow as expected
9852     // by ISD::USUBO_CARRY, so compute 1 - C.
9853     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9854                         DAG.getConstant(1, DL, MVT::i32), Carry);
9855   }
9856 
9857   // Return both values.
9858   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
9859 }
9860 
9861 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
9862   assert(Subtarget->isTargetDarwin());
9863 
9864   // For iOS, we want to call an alternative entry point: __sincos_stret,
9865   // return values are passed via sret.
9866   SDLoc dl(Op);
9867   SDValue Arg = Op.getOperand(0);
9868   EVT ArgVT = Arg.getValueType();
9869   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
9870   auto PtrVT = getPointerTy(DAG.getDataLayout());
9871 
9872   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9873   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9874 
9875   // Pair of floats / doubles used to pass the result.
9876   Type *RetTy = StructType::get(ArgTy, ArgTy);
9877   auto &DL = DAG.getDataLayout();
9878 
9879   ArgListTy Args;
9880   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
9881   SDValue SRet;
9882   if (ShouldUseSRet) {
9883     // Create stack object for sret.
9884     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
9885     const Align StackAlign = DL.getPrefTypeAlign(RetTy);
9886     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
9887     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
9888 
9889     ArgListEntry Entry;
9890     Entry.Node = SRet;
9891     Entry.Ty = PointerType::getUnqual(RetTy->getContext());
9892     Entry.IsSExt = false;
9893     Entry.IsZExt = false;
9894     Entry.IsSRet = true;
9895     Args.push_back(Entry);
9896     RetTy = Type::getVoidTy(*DAG.getContext());
9897   }
9898 
9899   ArgListEntry Entry;
9900   Entry.Node = Arg;
9901   Entry.Ty = ArgTy;
9902   Entry.IsSExt = false;
9903   Entry.IsZExt = false;
9904   Args.push_back(Entry);
9905 
9906   RTLIB::Libcall LC =
9907       (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
9908   const char *LibcallName = getLibcallName(LC);
9909   CallingConv::ID CC = getLibcallCallingConv(LC);
9910   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
9911 
9912   TargetLowering::CallLoweringInfo CLI(DAG);
9913   CLI.setDebugLoc(dl)
9914       .setChain(DAG.getEntryNode())
9915       .setCallee(CC, RetTy, Callee, std::move(Args))
9916       .setDiscardResult(ShouldUseSRet);
9917   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
9918 
9919   if (!ShouldUseSRet)
9920     return CallResult.first;
9921 
9922   SDValue LoadSin =
9923       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
9924 
9925   // Address of cos field.
9926   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
9927                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
9928   SDValue LoadCos =
9929       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
9930 
9931   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
9932   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
9933                      LoadSin.getValue(0), LoadCos.getValue(0));
9934 }
9935 
9936 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
9937                                                   bool Signed,
9938                                                   SDValue &Chain) const {
9939   EVT VT = Op.getValueType();
9940   assert((VT == MVT::i32 || VT == MVT::i64) &&
9941          "unexpected type for custom lowering DIV");
9942   SDLoc dl(Op);
9943 
9944   const auto &DL = DAG.getDataLayout();
9945   const auto &TLI = DAG.getTargetLoweringInfo();
9946 
9947   const char *Name = nullptr;
9948   if (Signed)
9949     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
9950   else
9951     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
9952 
9953   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
9954 
9955   ARMTargetLowering::ArgListTy Args;
9956 
9957   for (auto AI : {1, 0}) {
9958     ArgListEntry Arg;
9959     Arg.Node = Op.getOperand(AI);
9960     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
9961     Args.push_back(Arg);
9962   }
9963 
9964   CallLoweringInfo CLI(DAG);
9965   CLI.setDebugLoc(dl)
9966     .setChain(Chain)
9967     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
9968                ES, std::move(Args));
9969 
9970   return LowerCallTo(CLI).first;
9971 }
9972 
9973 // This is a code size optimisation: return the original SDIV node to
9974 // DAGCombiner when we don't want to expand SDIV into a sequence of
9975 // instructions, and an empty node otherwise which will cause the
9976 // SDIV to be expanded in DAGCombine.
9977 SDValue
9978 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
9979                                  SelectionDAG &DAG,
9980                                  SmallVectorImpl<SDNode *> &Created) const {
9981   // TODO: Support SREM
9982   if (N->getOpcode() != ISD::SDIV)
9983     return SDValue();
9984 
9985   const auto &ST = DAG.getSubtarget<ARMSubtarget>();
9986   const bool MinSize = ST.hasMinSize();
9987   const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
9988                                       : ST.hasDivideInARMMode();
9989 
9990   // Don't touch vector types; rewriting this may lead to scalarizing
9991   // the int divs.
9992   if (N->getOperand(0).getValueType().isVector())
9993     return SDValue();
9994 
9995   // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
9996   // hwdiv support for this to be really profitable.
9997   if (!(MinSize && HasDivide))
9998     return SDValue();
9999 
10000   // ARM mode is a bit simpler than Thumb: we can handle large power
10001   // of 2 immediates with 1 mov instruction; no further checks required,
10002   // just return the sdiv node.
10003   if (!ST.isThumb())
10004     return SDValue(N, 0);
10005 
10006   // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
10007   // and thus lose the code size benefits of a MOVS that requires only 2.
10008   // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
10009   // but as it's doing exactly this, it's not worth the trouble to get TTI.
10010   if (Divisor.sgt(128))
10011     return SDValue();
10012 
10013   return SDValue(N, 0);
10014 }
10015 
10016 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
10017                                             bool Signed) const {
10018   assert(Op.getValueType() == MVT::i32 &&
10019          "unexpected type for custom lowering DIV");
10020   SDLoc dl(Op);
10021 
10022   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
10023                                DAG.getEntryNode(), Op.getOperand(1));
10024 
10025   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
10026 }
10027 
10028 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
10029   SDLoc DL(N);
10030   SDValue Op = N->getOperand(1);
10031   if (N->getValueType(0) == MVT::i32)
10032     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
10033   SDValue Lo, Hi;
10034   std::tie(Lo, Hi) = DAG.SplitScalar(Op, DL, MVT::i32, MVT::i32);
10035   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
10036                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
10037 }
10038 
10039 void ARMTargetLowering::ExpandDIV_Windows(
10040     SDValue Op, SelectionDAG &DAG, bool Signed,
10041     SmallVectorImpl<SDValue> &Results) const {
10042   const auto &DL = DAG.getDataLayout();
10043   const auto &TLI = DAG.getTargetLoweringInfo();
10044 
10045   assert(Op.getValueType() == MVT::i64 &&
10046          "unexpected type for custom lowering DIV");
10047   SDLoc dl(Op);
10048 
10049   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
10050 
10051   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
10052 
10053   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
10054   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
10055                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
10056   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
10057 
10058   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper));
10059 }
10060 
10061 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
10062   LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
10063   EVT MemVT = LD->getMemoryVT();
10064   assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
10065           MemVT == MVT::v16i1) &&
10066          "Expected a predicate type!");
10067   assert(MemVT == Op.getValueType());
10068   assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
10069          "Expected a non-extending load");
10070   assert(LD->isUnindexed() && "Expected a unindexed load");
10071 
10072   // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit
10073   // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
10074   // need to make sure that 8/4/2 bits are actually loaded into the correct
10075   // place, which means loading the value and then shuffling the values into
10076   // the bottom bits of the predicate.
10077   // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
10078   // for BE).
10079   // Speaking of BE, apparently the rest of llvm will assume a reverse order to
10080   // a natural VMSR(load), so needs to be reversed.
10081 
10082   SDLoc dl(Op);
10083   SDValue Load = DAG.getExtLoad(
10084       ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(),
10085       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
10086       LD->getMemOperand());
10087   SDValue Val = Load;
10088   if (DAG.getDataLayout().isBigEndian())
10089     Val = DAG.getNode(ISD::SRL, dl, MVT::i32,
10090                       DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Load),
10091                       DAG.getConstant(32 - MemVT.getSizeInBits(), dl, MVT::i32));
10092   SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Val);
10093   if (MemVT != MVT::v16i1)
10094     Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred,
10095                        DAG.getConstant(0, dl, MVT::i32));
10096   return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
10097 }
10098 
10099 void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
10100                                   SelectionDAG &DAG) const {
10101   LoadSDNode *LD = cast<LoadSDNode>(N);
10102   EVT MemVT = LD->getMemoryVT();
10103   assert(LD->isUnindexed() && "Loads should be unindexed at this point.");
10104 
10105   if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
10106       !Subtarget->isThumb1Only() && LD->isVolatile() &&
10107       LD->getAlign() >= Subtarget->getDualLoadStoreAlignment()) {
10108     SDLoc dl(N);
10109     SDValue Result = DAG.getMemIntrinsicNode(
10110         ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}),
10111         {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand());
10112     SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1);
10113     SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0);
10114     SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
10115     Results.append({Pair, Result.getValue(2)});
10116   }
10117 }
10118 
10119 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
10120   StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
10121   EVT MemVT = ST->getMemoryVT();
10122   assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
10123           MemVT == MVT::v16i1) &&
10124          "Expected a predicate type!");
10125   assert(MemVT == ST->getValue().getValueType());
10126   assert(!ST->isTruncatingStore() && "Expected a non-extending store");
10127   assert(ST->isUnindexed() && "Expected a unindexed store");
10128 
10129   // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with
10130   // top bits unset and a scalar store.
10131   SDLoc dl(Op);
10132   SDValue Build = ST->getValue();
10133   if (MemVT != MVT::v16i1) {
10134     SmallVector<SDValue, 16> Ops;
10135     for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) {
10136       unsigned Elt = DAG.getDataLayout().isBigEndian()
10137                          ? MemVT.getVectorNumElements() - I - 1
10138                          : I;
10139       Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build,
10140                                 DAG.getConstant(Elt, dl, MVT::i32)));
10141     }
10142     for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++)
10143       Ops.push_back(DAG.getUNDEF(MVT::i32));
10144     Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops);
10145   }
10146   SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build);
10147   if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian())
10148     GRP = DAG.getNode(ISD::SRL, dl, MVT::i32,
10149                       DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, GRP),
10150                       DAG.getConstant(16, dl, MVT::i32));
10151   return DAG.getTruncStore(
10152       ST->getChain(), dl, GRP, ST->getBasePtr(),
10153       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
10154       ST->getMemOperand());
10155 }
10156 
10157 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
10158                           const ARMSubtarget *Subtarget) {
10159   StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
10160   EVT MemVT = ST->getMemoryVT();
10161   assert(ST->isUnindexed() && "Stores should be unindexed at this point.");
10162 
10163   if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
10164       !Subtarget->isThumb1Only() && ST->isVolatile() &&
10165       ST->getAlign() >= Subtarget->getDualLoadStoreAlignment()) {
10166     SDNode *N = Op.getNode();
10167     SDLoc dl(N);
10168 
10169     SDValue Lo = DAG.getNode(
10170         ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
10171         DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl,
10172                               MVT::i32));
10173     SDValue Hi = DAG.getNode(
10174         ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
10175         DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl,
10176                               MVT::i32));
10177 
10178     return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other),
10179                                    {ST->getChain(), Lo, Hi, ST->getBasePtr()},
10180                                    MemVT, ST->getMemOperand());
10181   } else if (Subtarget->hasMVEIntegerOps() &&
10182              ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
10183                MemVT == MVT::v16i1))) {
10184     return LowerPredicateStore(Op, DAG);
10185   }
10186 
10187   return SDValue();
10188 }
10189 
10190 static bool isZeroVector(SDValue N) {
10191   return (ISD::isBuildVectorAllZeros(N.getNode()) ||
10192           (N->getOpcode() == ARMISD::VMOVIMM &&
10193            isNullConstant(N->getOperand(0))));
10194 }
10195 
10196 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
10197   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
10198   MVT VT = Op.getSimpleValueType();
10199   SDValue Mask = N->getMask();
10200   SDValue PassThru = N->getPassThru();
10201   SDLoc dl(Op);
10202 
10203   if (isZeroVector(PassThru))
10204     return Op;
10205 
10206   // MVE Masked loads use zero as the passthru value. Here we convert undef to
10207   // zero too, and other values are lowered to a select.
10208   SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
10209                                 DAG.getTargetConstant(0, dl, MVT::i32));
10210   SDValue NewLoad = DAG.getMaskedLoad(
10211       VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec,
10212       N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
10213       N->getExtensionType(), N->isExpandingLoad());
10214   SDValue Combo = NewLoad;
10215   bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
10216                              PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
10217                             isZeroVector(PassThru->getOperand(0));
10218   if (!PassThru.isUndef() && !PassThruIsCastZero)
10219     Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
10220   return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
10221 }
10222 
10223 static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
10224                               const ARMSubtarget *ST) {
10225   if (!ST->hasMVEIntegerOps())
10226     return SDValue();
10227 
10228   SDLoc dl(Op);
10229   unsigned BaseOpcode = 0;
10230   switch (Op->getOpcode()) {
10231   default: llvm_unreachable("Expected VECREDUCE opcode");
10232   case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
10233   case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
10234   case ISD::VECREDUCE_MUL:  BaseOpcode = ISD::MUL; break;
10235   case ISD::VECREDUCE_AND:  BaseOpcode = ISD::AND; break;
10236   case ISD::VECREDUCE_OR:   BaseOpcode = ISD::OR; break;
10237   case ISD::VECREDUCE_XOR:  BaseOpcode = ISD::XOR; break;
10238   case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break;
10239   case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break;
10240   }
10241 
10242   SDValue Op0 = Op->getOperand(0);
10243   EVT VT = Op0.getValueType();
10244   EVT EltVT = VT.getVectorElementType();
10245   unsigned NumElts = VT.getVectorNumElements();
10246   unsigned NumActiveLanes = NumElts;
10247 
10248   assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||
10249           NumActiveLanes == 2) &&
10250          "Only expected a power 2 vector size");
10251 
10252   // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements
10253   // allows us to easily extract vector elements from the lanes.
10254   while (NumActiveLanes > 4) {
10255     unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32;
10256     SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0);
10257     Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev);
10258     NumActiveLanes /= 2;
10259   }
10260 
10261   SDValue Res;
10262   if (NumActiveLanes == 4) {
10263     // The remaining 4 elements are summed sequentially
10264     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10265                               DAG.getConstant(0 * NumElts / 4, dl, MVT::i32));
10266     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10267                               DAG.getConstant(1 * NumElts / 4, dl, MVT::i32));
10268     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10269                               DAG.getConstant(2 * NumElts / 4, dl, MVT::i32));
10270     SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10271                               DAG.getConstant(3 * NumElts / 4, dl, MVT::i32));
10272     SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
10273     SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags());
10274     Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags());
10275   } else {
10276     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10277                               DAG.getConstant(0, dl, MVT::i32));
10278     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10279                               DAG.getConstant(1, dl, MVT::i32));
10280     Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
10281   }
10282 
10283   // Result type may be wider than element type.
10284   if (EltVT != Op->getValueType(0))
10285     Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res);
10286   return Res;
10287 }
10288 
10289 static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
10290                                const ARMSubtarget *ST) {
10291   if (!ST->hasMVEFloatOps())
10292     return SDValue();
10293   return LowerVecReduce(Op, DAG, ST);
10294 }
10295 
10296 static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG,
10297                                     const ARMSubtarget *ST) {
10298   if (!ST->hasNEON())
10299     return SDValue();
10300 
10301   SDLoc dl(Op);
10302   SDValue Op0 = Op->getOperand(0);
10303   EVT VT = Op0.getValueType();
10304   EVT EltVT = VT.getVectorElementType();
10305 
10306   unsigned PairwiseIntrinsic = 0;
10307   switch (Op->getOpcode()) {
10308   default:
10309     llvm_unreachable("Expected VECREDUCE opcode");
10310   case ISD::VECREDUCE_UMIN:
10311     PairwiseIntrinsic = Intrinsic::arm_neon_vpminu;
10312     break;
10313   case ISD::VECREDUCE_UMAX:
10314     PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxu;
10315     break;
10316   case ISD::VECREDUCE_SMIN:
10317     PairwiseIntrinsic = Intrinsic::arm_neon_vpmins;
10318     break;
10319   case ISD::VECREDUCE_SMAX:
10320     PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxs;
10321     break;
10322   }
10323   SDValue PairwiseOp = DAG.getConstant(PairwiseIntrinsic, dl, MVT::i32);
10324 
10325   unsigned NumElts = VT.getVectorNumElements();
10326   unsigned NumActiveLanes = NumElts;
10327 
10328   assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||
10329           NumActiveLanes == 2) &&
10330          "Only expected a power 2 vector size");
10331 
10332   // Split 128-bit vectors, since vpmin/max takes 2 64-bit vectors.
10333   if (VT.is128BitVector()) {
10334     SDValue Lo, Hi;
10335     std::tie(Lo, Hi) = DAG.SplitVector(Op0, dl);
10336     VT = Lo.getValueType();
10337     Op0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, {PairwiseOp, Lo, Hi});
10338     NumActiveLanes /= 2;
10339   }
10340 
10341   // Use pairwise reductions until one lane remains
10342   while (NumActiveLanes > 1) {
10343     Op0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, {PairwiseOp, Op0, Op0});
10344     NumActiveLanes /= 2;
10345   }
10346 
10347   SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
10348                             DAG.getConstant(0, dl, MVT::i32));
10349 
10350   // Result type may be wider than element type.
10351   if (EltVT != Op.getValueType()) {
10352     unsigned Extend = 0;
10353     switch (Op->getOpcode()) {
10354     default:
10355       llvm_unreachable("Expected VECREDUCE opcode");
10356     case ISD::VECREDUCE_UMIN:
10357     case ISD::VECREDUCE_UMAX:
10358       Extend = ISD::ZERO_EXTEND;
10359       break;
10360     case ISD::VECREDUCE_SMIN:
10361     case ISD::VECREDUCE_SMAX:
10362       Extend = ISD::SIGN_EXTEND;
10363       break;
10364     }
10365     Res = DAG.getNode(Extend, dl, Op.getValueType(), Res);
10366   }
10367   return Res;
10368 }
10369 
10370 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
10371   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering()))
10372     // Acquire/Release load/store is not legal for targets without a dmb or
10373     // equivalent available.
10374     return SDValue();
10375 
10376   // Monotonic load/store is legal for all targets.
10377   return Op;
10378 }
10379 
10380 static void ReplaceREADCYCLECOUNTER(SDNode *N,
10381                                     SmallVectorImpl<SDValue> &Results,
10382                                     SelectionDAG &DAG,
10383                                     const ARMSubtarget *Subtarget) {
10384   SDLoc DL(N);
10385   // Under Power Management extensions, the cycle-count is:
10386   //    mrc p15, #0, <Rt>, c9, c13, #0
10387   SDValue Ops[] = { N->getOperand(0), // Chain
10388                     DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
10389                     DAG.getTargetConstant(15, DL, MVT::i32),
10390                     DAG.getTargetConstant(0, DL, MVT::i32),
10391                     DAG.getTargetConstant(9, DL, MVT::i32),
10392                     DAG.getTargetConstant(13, DL, MVT::i32),
10393                     DAG.getTargetConstant(0, DL, MVT::i32)
10394   };
10395 
10396   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
10397                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
10398   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
10399                                 DAG.getConstant(0, DL, MVT::i32)));
10400   Results.push_back(Cycles32.getValue(1));
10401 }
10402 
10403 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
10404   SDLoc dl(V.getNode());
10405   auto [VLo, VHi] = DAG.SplitScalar(V, dl, MVT::i32, MVT::i32);
10406   bool isBigEndian = DAG.getDataLayout().isBigEndian();
10407   if (isBigEndian)
10408     std::swap (VLo, VHi);
10409   SDValue RegClass =
10410       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
10411   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
10412   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
10413   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
10414   return SDValue(
10415       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
10416 }
10417 
10418 static void ReplaceCMP_SWAP_64Results(SDNode *N,
10419                                        SmallVectorImpl<SDValue> & Results,
10420                                        SelectionDAG &DAG) {
10421   assert(N->getValueType(0) == MVT::i64 &&
10422          "AtomicCmpSwap on types less than 64 should be legal");
10423   SDValue Ops[] = {N->getOperand(1),
10424                    createGPRPairNode(DAG, N->getOperand(2)),
10425                    createGPRPairNode(DAG, N->getOperand(3)),
10426                    N->getOperand(0)};
10427   SDNode *CmpSwap = DAG.getMachineNode(
10428       ARM::CMP_SWAP_64, SDLoc(N),
10429       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
10430 
10431   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
10432   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
10433 
10434   bool isBigEndian = DAG.getDataLayout().isBigEndian();
10435 
10436   SDValue Lo =
10437       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
10438                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
10439   SDValue Hi =
10440       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
10441                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
10442   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi));
10443   Results.push_back(SDValue(CmpSwap, 2));
10444 }
10445 
10446 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {
10447   SDLoc dl(Op);
10448   EVT VT = Op.getValueType();
10449   SDValue Chain = Op.getOperand(0);
10450   SDValue LHS = Op.getOperand(1);
10451   SDValue RHS = Op.getOperand(2);
10452   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
10453   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
10454 
10455   // If we don't have instructions of this float type then soften to a libcall
10456   // and use SETCC instead.
10457   if (isUnsupportedFloatingType(LHS.getValueType())) {
10458     DAG.getTargetLoweringInfo().softenSetCCOperands(
10459       DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
10460     if (!RHS.getNode()) {
10461       RHS = DAG.getConstant(0, dl, LHS.getValueType());
10462       CC = ISD::SETNE;
10463     }
10464     SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS,
10465                                  DAG.getCondCode(CC));
10466     return DAG.getMergeValues({Result, Chain}, dl);
10467   }
10468 
10469   ARMCC::CondCodes CondCode, CondCode2;
10470   FPCCToARMCC(CC, CondCode, CondCode2);
10471 
10472   // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit
10473   // in CMPFP and CMPFPE, but instead it should be made explicit by these
10474   // instructions using a chain instead of glue. This would also fix the problem
10475   // here (and also in LowerSELECT_CC) where we generate two comparisons when
10476   // CondCode2 != AL.
10477   SDValue True = DAG.getConstant(1, dl, VT);
10478   SDValue False =  DAG.getConstant(0, dl, VT);
10479   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
10480   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
10481   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
10482   SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
10483   if (CondCode2 != ARMCC::AL) {
10484     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
10485     Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
10486     Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
10487   }
10488   return DAG.getMergeValues({Result, Chain}, dl);
10489 }
10490 
10491 SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const {
10492   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10493 
10494   EVT VT = getPointerTy(DAG.getDataLayout());
10495   SDLoc DL(Op);
10496   int FI = MFI.CreateFixedObject(4, 0, false);
10497   return DAG.getFrameIndex(FI, VT);
10498 }
10499 
10500 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10501   LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
10502   switch (Op.getOpcode()) {
10503   default: llvm_unreachable("Don't know how to custom lower this!");
10504   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
10505   case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
10506   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
10507   case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
10508   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
10509   case ISD::SELECT:        return LowerSELECT(Op, DAG);
10510   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
10511   case ISD::BRCOND:        return LowerBRCOND(Op, DAG);
10512   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
10513   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
10514   case ISD::VASTART:       return LowerVASTART(Op, DAG);
10515   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
10516   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
10517   case ISD::SINT_TO_FP:
10518   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
10519   case ISD::STRICT_FP_TO_SINT:
10520   case ISD::STRICT_FP_TO_UINT:
10521   case ISD::FP_TO_SINT:
10522   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
10523   case ISD::FP_TO_SINT_SAT:
10524   case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget);
10525   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
10526   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
10527   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
10528   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
10529   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
10530   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
10531   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget);
10532   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
10533                                                                Subtarget);
10534   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
10535   case ISD::SHL:
10536   case ISD::SRL:
10537   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
10538   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
10539   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
10540   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
10541   case ISD::SRL_PARTS:
10542   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
10543   case ISD::CTTZ:
10544   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
10545   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
10546   case ISD::SETCC:         return LowerVSETCC(Op, DAG, Subtarget);
10547   case ISD::SETCCCARRY:    return LowerSETCCCARRY(Op, DAG);
10548   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
10549   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
10550   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
10551   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget);
10552   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
10553   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget);
10554   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget);
10555   case ISD::TRUNCATE:      return LowerTruncate(Op.getNode(), DAG, Subtarget);
10556   case ISD::SIGN_EXTEND:
10557   case ISD::ZERO_EXTEND:   return LowerVectorExtend(Op.getNode(), DAG, Subtarget);
10558   case ISD::GET_ROUNDING:  return LowerGET_ROUNDING(Op, DAG);
10559   case ISD::SET_ROUNDING:  return LowerSET_ROUNDING(Op, DAG);
10560   case ISD::MUL:           return LowerMUL(Op, DAG);
10561   case ISD::SDIV:
10562     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
10563       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
10564     return LowerSDIV(Op, DAG, Subtarget);
10565   case ISD::UDIV:
10566     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
10567       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
10568     return LowerUDIV(Op, DAG, Subtarget);
10569   case ISD::UADDO_CARRY:
10570   case ISD::USUBO_CARRY:
10571     return LowerUADDSUBO_CARRY(Op, DAG);
10572   case ISD::SADDO:
10573   case ISD::SSUBO:
10574     return LowerSignedALUO(Op, DAG);
10575   case ISD::UADDO:
10576   case ISD::USUBO:
10577     return LowerUnsignedALUO(Op, DAG);
10578   case ISD::SADDSAT:
10579   case ISD::SSUBSAT:
10580   case ISD::UADDSAT:
10581   case ISD::USUBSAT:
10582     return LowerADDSUBSAT(Op, DAG, Subtarget);
10583   case ISD::LOAD:
10584     return LowerPredicateLoad(Op, DAG);
10585   case ISD::STORE:
10586     return LowerSTORE(Op, DAG, Subtarget);
10587   case ISD::MLOAD:
10588     return LowerMLOAD(Op, DAG);
10589   case ISD::VECREDUCE_MUL:
10590   case ISD::VECREDUCE_AND:
10591   case ISD::VECREDUCE_OR:
10592   case ISD::VECREDUCE_XOR:
10593     return LowerVecReduce(Op, DAG, Subtarget);
10594   case ISD::VECREDUCE_FADD:
10595   case ISD::VECREDUCE_FMUL:
10596   case ISD::VECREDUCE_FMIN:
10597   case ISD::VECREDUCE_FMAX:
10598     return LowerVecReduceF(Op, DAG, Subtarget);
10599   case ISD::VECREDUCE_UMIN:
10600   case ISD::VECREDUCE_UMAX:
10601   case ISD::VECREDUCE_SMIN:
10602   case ISD::VECREDUCE_SMAX:
10603     return LowerVecReduceMinMax(Op, DAG, Subtarget);
10604   case ISD::ATOMIC_LOAD:
10605   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
10606   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
10607   case ISD::SDIVREM:
10608   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
10609   case ISD::DYNAMIC_STACKALLOC:
10610     if (Subtarget->isTargetWindows())
10611       return LowerDYNAMIC_STACKALLOC(Op, DAG);
10612     llvm_unreachable("Don't know how to custom lower this!");
10613   case ISD::STRICT_FP_ROUND:
10614   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
10615   case ISD::STRICT_FP_EXTEND:
10616   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
10617   case ISD::STRICT_FSETCC:
10618   case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG);
10619   case ISD::SPONENTRY:
10620     return LowerSPONENTRY(Op, DAG);
10621   case ARMISD::WIN__DBZCHK: return SDValue();
10622   }
10623 }
10624 
10625 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
10626                                  SelectionDAG &DAG) {
10627   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
10628   unsigned Opc = 0;
10629   if (IntNo == Intrinsic::arm_smlald)
10630     Opc = ARMISD::SMLALD;
10631   else if (IntNo == Intrinsic::arm_smlaldx)
10632     Opc = ARMISD::SMLALDX;
10633   else if (IntNo == Intrinsic::arm_smlsld)
10634     Opc = ARMISD::SMLSLD;
10635   else if (IntNo == Intrinsic::arm_smlsldx)
10636     Opc = ARMISD::SMLSLDX;
10637   else
10638     return;
10639 
10640   SDLoc dl(N);
10641   SDValue Lo, Hi;
10642   std::tie(Lo, Hi) = DAG.SplitScalar(N->getOperand(3), dl, MVT::i32, MVT::i32);
10643 
10644   SDValue LongMul = DAG.getNode(Opc, dl,
10645                                 DAG.getVTList(MVT::i32, MVT::i32),
10646                                 N->getOperand(1), N->getOperand(2),
10647                                 Lo, Hi);
10648   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
10649                                 LongMul.getValue(0), LongMul.getValue(1)));
10650 }
10651 
10652 /// ReplaceNodeResults - Replace the results of node with an illegal result
10653 /// type with new values built out of custom code.
10654 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
10655                                            SmallVectorImpl<SDValue> &Results,
10656                                            SelectionDAG &DAG) const {
10657   SDValue Res;
10658   switch (N->getOpcode()) {
10659   default:
10660     llvm_unreachable("Don't know how to custom expand this!");
10661   case ISD::READ_REGISTER:
10662     ExpandREAD_REGISTER(N, Results, DAG);
10663     break;
10664   case ISD::BITCAST:
10665     Res = ExpandBITCAST(N, DAG, Subtarget);
10666     break;
10667   case ISD::SRL:
10668   case ISD::SRA:
10669   case ISD::SHL:
10670     Res = Expand64BitShift(N, DAG, Subtarget);
10671     break;
10672   case ISD::SREM:
10673   case ISD::UREM:
10674     Res = LowerREM(N, DAG);
10675     break;
10676   case ISD::SDIVREM:
10677   case ISD::UDIVREM:
10678     Res = LowerDivRem(SDValue(N, 0), DAG);
10679     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
10680     Results.push_back(Res.getValue(0));
10681     Results.push_back(Res.getValue(1));
10682     return;
10683   case ISD::SADDSAT:
10684   case ISD::SSUBSAT:
10685   case ISD::UADDSAT:
10686   case ISD::USUBSAT:
10687     Res = LowerADDSUBSAT(SDValue(N, 0), DAG, Subtarget);
10688     break;
10689   case ISD::READCYCLECOUNTER:
10690     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
10691     return;
10692   case ISD::UDIV:
10693   case ISD::SDIV:
10694     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
10695     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
10696                              Results);
10697   case ISD::ATOMIC_CMP_SWAP:
10698     ReplaceCMP_SWAP_64Results(N, Results, DAG);
10699     return;
10700   case ISD::INTRINSIC_WO_CHAIN:
10701     return ReplaceLongIntrinsic(N, Results, DAG);
10702   case ISD::LOAD:
10703     LowerLOAD(N, Results, DAG);
10704     break;
10705   case ISD::TRUNCATE:
10706     Res = LowerTruncate(N, DAG, Subtarget);
10707     break;
10708   case ISD::SIGN_EXTEND:
10709   case ISD::ZERO_EXTEND:
10710     Res = LowerVectorExtend(N, DAG, Subtarget);
10711     break;
10712   case ISD::FP_TO_SINT_SAT:
10713   case ISD::FP_TO_UINT_SAT:
10714     Res = LowerFP_TO_INT_SAT(SDValue(N, 0), DAG, Subtarget);
10715     break;
10716   }
10717   if (Res.getNode())
10718     Results.push_back(Res);
10719 }
10720 
10721 //===----------------------------------------------------------------------===//
10722 //                           ARM Scheduler Hooks
10723 //===----------------------------------------------------------------------===//
10724 
10725 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
10726 /// registers the function context.
10727 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
10728                                                MachineBasicBlock *MBB,
10729                                                MachineBasicBlock *DispatchBB,
10730                                                int FI) const {
10731   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
10732          "ROPI/RWPI not currently supported with SjLj");
10733   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10734   DebugLoc dl = MI.getDebugLoc();
10735   MachineFunction *MF = MBB->getParent();
10736   MachineRegisterInfo *MRI = &MF->getRegInfo();
10737   MachineConstantPool *MCP = MF->getConstantPool();
10738   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
10739   const Function &F = MF->getFunction();
10740 
10741   bool isThumb = Subtarget->isThumb();
10742   bool isThumb2 = Subtarget->isThumb2();
10743 
10744   unsigned PCLabelId = AFI->createPICLabelUId();
10745   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
10746   ARMConstantPoolValue *CPV =
10747     ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
10748   unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4));
10749 
10750   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
10751                                            : &ARM::GPRRegClass;
10752 
10753   // Grab constant pool and fixed stack memory operands.
10754   MachineMemOperand *CPMMO =
10755       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
10756                                MachineMemOperand::MOLoad, 4, Align(4));
10757 
10758   MachineMemOperand *FIMMOSt =
10759       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
10760                                MachineMemOperand::MOStore, 4, Align(4));
10761 
10762   // Load the address of the dispatch MBB into the jump buffer.
10763   if (isThumb2) {
10764     // Incoming value: jbuf
10765     //   ldr.n  r5, LCPI1_1
10766     //   orr    r5, r5, #1
10767     //   add    r5, pc
10768     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
10769     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10770     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
10771         .addConstantPoolIndex(CPI)
10772         .addMemOperand(CPMMO)
10773         .add(predOps(ARMCC::AL));
10774     // Set the low bit because of thumb mode.
10775     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10776     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
10777         .addReg(NewVReg1, RegState::Kill)
10778         .addImm(0x01)
10779         .add(predOps(ARMCC::AL))
10780         .add(condCodeOp());
10781     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10782     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
10783       .addReg(NewVReg2, RegState::Kill)
10784       .addImm(PCLabelId);
10785     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
10786         .addReg(NewVReg3, RegState::Kill)
10787         .addFrameIndex(FI)
10788         .addImm(36) // &jbuf[1] :: pc
10789         .addMemOperand(FIMMOSt)
10790         .add(predOps(ARMCC::AL));
10791   } else if (isThumb) {
10792     // Incoming value: jbuf
10793     //   ldr.n  r1, LCPI1_4
10794     //   add    r1, pc
10795     //   mov    r2, #1
10796     //   orrs   r1, r2
10797     //   add    r2, $jbuf, #+4 ; &jbuf[1]
10798     //   str    r1, [r2]
10799     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10800     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
10801         .addConstantPoolIndex(CPI)
10802         .addMemOperand(CPMMO)
10803         .add(predOps(ARMCC::AL));
10804     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10805     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
10806       .addReg(NewVReg1, RegState::Kill)
10807       .addImm(PCLabelId);
10808     // Set the low bit because of thumb mode.
10809     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10810     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
10811         .addReg(ARM::CPSR, RegState::Define)
10812         .addImm(1)
10813         .add(predOps(ARMCC::AL));
10814     Register NewVReg4 = MRI->createVirtualRegister(TRC);
10815     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
10816         .addReg(ARM::CPSR, RegState::Define)
10817         .addReg(NewVReg2, RegState::Kill)
10818         .addReg(NewVReg3, RegState::Kill)
10819         .add(predOps(ARMCC::AL));
10820     Register NewVReg5 = MRI->createVirtualRegister(TRC);
10821     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
10822             .addFrameIndex(FI)
10823             .addImm(36); // &jbuf[1] :: pc
10824     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
10825         .addReg(NewVReg4, RegState::Kill)
10826         .addReg(NewVReg5, RegState::Kill)
10827         .addImm(0)
10828         .addMemOperand(FIMMOSt)
10829         .add(predOps(ARMCC::AL));
10830   } else {
10831     // Incoming value: jbuf
10832     //   ldr  r1, LCPI1_1
10833     //   add  r1, pc, r1
10834     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
10835     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10836     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
10837         .addConstantPoolIndex(CPI)
10838         .addImm(0)
10839         .addMemOperand(CPMMO)
10840         .add(predOps(ARMCC::AL));
10841     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10842     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
10843         .addReg(NewVReg1, RegState::Kill)
10844         .addImm(PCLabelId)
10845         .add(predOps(ARMCC::AL));
10846     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
10847         .addReg(NewVReg2, RegState::Kill)
10848         .addFrameIndex(FI)
10849         .addImm(36) // &jbuf[1] :: pc
10850         .addMemOperand(FIMMOSt)
10851         .add(predOps(ARMCC::AL));
10852   }
10853 }
10854 
10855 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
10856                                               MachineBasicBlock *MBB) const {
10857   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10858   DebugLoc dl = MI.getDebugLoc();
10859   MachineFunction *MF = MBB->getParent();
10860   MachineRegisterInfo *MRI = &MF->getRegInfo();
10861   MachineFrameInfo &MFI = MF->getFrameInfo();
10862   int FI = MFI.getFunctionContextIndex();
10863 
10864   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
10865                                                         : &ARM::GPRnopcRegClass;
10866 
10867   // Get a mapping of the call site numbers to all of the landing pads they're
10868   // associated with.
10869   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
10870   unsigned MaxCSNum = 0;
10871   for (MachineBasicBlock &BB : *MF) {
10872     if (!BB.isEHPad())
10873       continue;
10874 
10875     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
10876     // pad.
10877     for (MachineInstr &II : BB) {
10878       if (!II.isEHLabel())
10879         continue;
10880 
10881       MCSymbol *Sym = II.getOperand(0).getMCSymbol();
10882       if (!MF->hasCallSiteLandingPad(Sym)) continue;
10883 
10884       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
10885       for (unsigned Idx : CallSiteIdxs) {
10886         CallSiteNumToLPad[Idx].push_back(&BB);
10887         MaxCSNum = std::max(MaxCSNum, Idx);
10888       }
10889       break;
10890     }
10891   }
10892 
10893   // Get an ordered list of the machine basic blocks for the jump table.
10894   std::vector<MachineBasicBlock*> LPadList;
10895   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
10896   LPadList.reserve(CallSiteNumToLPad.size());
10897   for (unsigned I = 1; I <= MaxCSNum; ++I) {
10898     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
10899     for (MachineBasicBlock *MBB : MBBList) {
10900       LPadList.push_back(MBB);
10901       InvokeBBs.insert(MBB->pred_begin(), MBB->pred_end());
10902     }
10903   }
10904 
10905   assert(!LPadList.empty() &&
10906          "No landing pad destinations for the dispatch jump table!");
10907 
10908   // Create the jump table and associated information.
10909   MachineJumpTableInfo *JTI =
10910     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
10911   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
10912 
10913   // Create the MBBs for the dispatch code.
10914 
10915   // Shove the dispatch's address into the return slot in the function context.
10916   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
10917   DispatchBB->setIsEHPad();
10918 
10919   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10920   unsigned trap_opcode;
10921   if (Subtarget->isThumb())
10922     trap_opcode = ARM::tTRAP;
10923   else
10924     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
10925 
10926   BuildMI(TrapBB, dl, TII->get(trap_opcode));
10927   DispatchBB->addSuccessor(TrapBB);
10928 
10929   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
10930   DispatchBB->addSuccessor(DispContBB);
10931 
10932   // Insert and MBBs.
10933   MF->insert(MF->end(), DispatchBB);
10934   MF->insert(MF->end(), DispContBB);
10935   MF->insert(MF->end(), TrapBB);
10936 
10937   // Insert code into the entry block that creates and registers the function
10938   // context.
10939   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
10940 
10941   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
10942       MachinePointerInfo::getFixedStack(*MF, FI),
10943       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4));
10944 
10945   MachineInstrBuilder MIB;
10946   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
10947 
10948   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
10949   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
10950 
10951   // Add a register mask with no preserved registers.  This results in all
10952   // registers being marked as clobbered. This can't work if the dispatch block
10953   // is in a Thumb1 function and is linked with ARM code which uses the FP
10954   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
10955   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
10956 
10957   bool IsPositionIndependent = isPositionIndependent();
10958   unsigned NumLPads = LPadList.size();
10959   if (Subtarget->isThumb2()) {
10960     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10961     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
10962         .addFrameIndex(FI)
10963         .addImm(4)
10964         .addMemOperand(FIMMOLd)
10965         .add(predOps(ARMCC::AL));
10966 
10967     if (NumLPads < 256) {
10968       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
10969           .addReg(NewVReg1)
10970           .addImm(LPadList.size())
10971           .add(predOps(ARMCC::AL));
10972     } else {
10973       Register VReg1 = MRI->createVirtualRegister(TRC);
10974       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
10975           .addImm(NumLPads & 0xFFFF)
10976           .add(predOps(ARMCC::AL));
10977 
10978       unsigned VReg2 = VReg1;
10979       if ((NumLPads & 0xFFFF0000) != 0) {
10980         VReg2 = MRI->createVirtualRegister(TRC);
10981         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
10982             .addReg(VReg1)
10983             .addImm(NumLPads >> 16)
10984             .add(predOps(ARMCC::AL));
10985       }
10986 
10987       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
10988           .addReg(NewVReg1)
10989           .addReg(VReg2)
10990           .add(predOps(ARMCC::AL));
10991     }
10992 
10993     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
10994       .addMBB(TrapBB)
10995       .addImm(ARMCC::HI)
10996       .addReg(ARM::CPSR);
10997 
10998     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10999     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
11000         .addJumpTableIndex(MJTI)
11001         .add(predOps(ARMCC::AL));
11002 
11003     Register NewVReg4 = MRI->createVirtualRegister(TRC);
11004     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
11005         .addReg(NewVReg3, RegState::Kill)
11006         .addReg(NewVReg1)
11007         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
11008         .add(predOps(ARMCC::AL))
11009         .add(condCodeOp());
11010 
11011     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
11012       .addReg(NewVReg4, RegState::Kill)
11013       .addReg(NewVReg1)
11014       .addJumpTableIndex(MJTI);
11015   } else if (Subtarget->isThumb()) {
11016     Register NewVReg1 = MRI->createVirtualRegister(TRC);
11017     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
11018         .addFrameIndex(FI)
11019         .addImm(1)
11020         .addMemOperand(FIMMOLd)
11021         .add(predOps(ARMCC::AL));
11022 
11023     if (NumLPads < 256) {
11024       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
11025           .addReg(NewVReg1)
11026           .addImm(NumLPads)
11027           .add(predOps(ARMCC::AL));
11028     } else {
11029       MachineConstantPool *ConstantPool = MF->getConstantPool();
11030       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
11031       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
11032 
11033       // MachineConstantPool wants an explicit alignment.
11034       Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
11035       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
11036 
11037       Register VReg1 = MRI->createVirtualRegister(TRC);
11038       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
11039           .addReg(VReg1, RegState::Define)
11040           .addConstantPoolIndex(Idx)
11041           .add(predOps(ARMCC::AL));
11042       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
11043           .addReg(NewVReg1)
11044           .addReg(VReg1)
11045           .add(predOps(ARMCC::AL));
11046     }
11047 
11048     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
11049       .addMBB(TrapBB)
11050       .addImm(ARMCC::HI)
11051       .addReg(ARM::CPSR);
11052 
11053     Register NewVReg2 = MRI->createVirtualRegister(TRC);
11054     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
11055         .addReg(ARM::CPSR, RegState::Define)
11056         .addReg(NewVReg1)
11057         .addImm(2)
11058         .add(predOps(ARMCC::AL));
11059 
11060     Register NewVReg3 = MRI->createVirtualRegister(TRC);
11061     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
11062         .addJumpTableIndex(MJTI)
11063         .add(predOps(ARMCC::AL));
11064 
11065     Register NewVReg4 = MRI->createVirtualRegister(TRC);
11066     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
11067         .addReg(ARM::CPSR, RegState::Define)
11068         .addReg(NewVReg2, RegState::Kill)
11069         .addReg(NewVReg3)
11070         .add(predOps(ARMCC::AL));
11071 
11072     MachineMemOperand *JTMMOLd =
11073         MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
11074                                  MachineMemOperand::MOLoad, 4, Align(4));
11075 
11076     Register NewVReg5 = MRI->createVirtualRegister(TRC);
11077     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
11078         .addReg(NewVReg4, RegState::Kill)
11079         .addImm(0)
11080         .addMemOperand(JTMMOLd)
11081         .add(predOps(ARMCC::AL));
11082 
11083     unsigned NewVReg6 = NewVReg5;
11084     if (IsPositionIndependent) {
11085       NewVReg6 = MRI->createVirtualRegister(TRC);
11086       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
11087           .addReg(ARM::CPSR, RegState::Define)
11088           .addReg(NewVReg5, RegState::Kill)
11089           .addReg(NewVReg3)
11090           .add(predOps(ARMCC::AL));
11091     }
11092 
11093     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
11094       .addReg(NewVReg6, RegState::Kill)
11095       .addJumpTableIndex(MJTI);
11096   } else {
11097     Register NewVReg1 = MRI->createVirtualRegister(TRC);
11098     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
11099         .addFrameIndex(FI)
11100         .addImm(4)
11101         .addMemOperand(FIMMOLd)
11102         .add(predOps(ARMCC::AL));
11103 
11104     if (NumLPads < 256) {
11105       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
11106           .addReg(NewVReg1)
11107           .addImm(NumLPads)
11108           .add(predOps(ARMCC::AL));
11109     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
11110       Register VReg1 = MRI->createVirtualRegister(TRC);
11111       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
11112           .addImm(NumLPads & 0xFFFF)
11113           .add(predOps(ARMCC::AL));
11114 
11115       unsigned VReg2 = VReg1;
11116       if ((NumLPads & 0xFFFF0000) != 0) {
11117         VReg2 = MRI->createVirtualRegister(TRC);
11118         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
11119             .addReg(VReg1)
11120             .addImm(NumLPads >> 16)
11121             .add(predOps(ARMCC::AL));
11122       }
11123 
11124       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
11125           .addReg(NewVReg1)
11126           .addReg(VReg2)
11127           .add(predOps(ARMCC::AL));
11128     } else {
11129       MachineConstantPool *ConstantPool = MF->getConstantPool();
11130       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
11131       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
11132 
11133       // MachineConstantPool wants an explicit alignment.
11134       Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
11135       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
11136 
11137       Register VReg1 = MRI->createVirtualRegister(TRC);
11138       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
11139           .addReg(VReg1, RegState::Define)
11140           .addConstantPoolIndex(Idx)
11141           .addImm(0)
11142           .add(predOps(ARMCC::AL));
11143       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
11144           .addReg(NewVReg1)
11145           .addReg(VReg1, RegState::Kill)
11146           .add(predOps(ARMCC::AL));
11147     }
11148 
11149     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
11150       .addMBB(TrapBB)
11151       .addImm(ARMCC::HI)
11152       .addReg(ARM::CPSR);
11153 
11154     Register NewVReg3 = MRI->createVirtualRegister(TRC);
11155     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
11156         .addReg(NewVReg1)
11157         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
11158         .add(predOps(ARMCC::AL))
11159         .add(condCodeOp());
11160     Register NewVReg4 = MRI->createVirtualRegister(TRC);
11161     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
11162         .addJumpTableIndex(MJTI)
11163         .add(predOps(ARMCC::AL));
11164 
11165     MachineMemOperand *JTMMOLd =
11166         MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
11167                                  MachineMemOperand::MOLoad, 4, Align(4));
11168     Register NewVReg5 = MRI->createVirtualRegister(TRC);
11169     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
11170         .addReg(NewVReg3, RegState::Kill)
11171         .addReg(NewVReg4)
11172         .addImm(0)
11173         .addMemOperand(JTMMOLd)
11174         .add(predOps(ARMCC::AL));
11175 
11176     if (IsPositionIndependent) {
11177       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
11178         .addReg(NewVReg5, RegState::Kill)
11179         .addReg(NewVReg4)
11180         .addJumpTableIndex(MJTI);
11181     } else {
11182       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
11183         .addReg(NewVReg5, RegState::Kill)
11184         .addJumpTableIndex(MJTI);
11185     }
11186   }
11187 
11188   // Add the jump table entries as successors to the MBB.
11189   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
11190   for (MachineBasicBlock *CurMBB : LPadList) {
11191     if (SeenMBBs.insert(CurMBB).second)
11192       DispContBB->addSuccessor(CurMBB);
11193   }
11194 
11195   // N.B. the order the invoke BBs are processed in doesn't matter here.
11196   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
11197   SmallVector<MachineBasicBlock*, 64> MBBLPads;
11198   for (MachineBasicBlock *BB : InvokeBBs) {
11199 
11200     // Remove the landing pad successor from the invoke block and replace it
11201     // with the new dispatch block.
11202     SmallVector<MachineBasicBlock*, 4> Successors(BB->successors());
11203     while (!Successors.empty()) {
11204       MachineBasicBlock *SMBB = Successors.pop_back_val();
11205       if (SMBB->isEHPad()) {
11206         BB->removeSuccessor(SMBB);
11207         MBBLPads.push_back(SMBB);
11208       }
11209     }
11210 
11211     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
11212     BB->normalizeSuccProbs();
11213 
11214     // Find the invoke call and mark all of the callee-saved registers as
11215     // 'implicit defined' so that they're spilled. This prevents code from
11216     // moving instructions to before the EH block, where they will never be
11217     // executed.
11218     for (MachineBasicBlock::reverse_iterator
11219            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
11220       if (!II->isCall()) continue;
11221 
11222       DenseMap<unsigned, bool> DefRegs;
11223       for (MachineInstr::mop_iterator
11224              OI = II->operands_begin(), OE = II->operands_end();
11225            OI != OE; ++OI) {
11226         if (!OI->isReg()) continue;
11227         DefRegs[OI->getReg()] = true;
11228       }
11229 
11230       MachineInstrBuilder MIB(*MF, &*II);
11231 
11232       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
11233         unsigned Reg = SavedRegs[i];
11234         if (Subtarget->isThumb2() &&
11235             !ARM::tGPRRegClass.contains(Reg) &&
11236             !ARM::hGPRRegClass.contains(Reg))
11237           continue;
11238         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
11239           continue;
11240         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
11241           continue;
11242         if (!DefRegs[Reg])
11243           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
11244       }
11245 
11246       break;
11247     }
11248   }
11249 
11250   // Mark all former landing pads as non-landing pads. The dispatch is the only
11251   // landing pad now.
11252   for (MachineBasicBlock *MBBLPad : MBBLPads)
11253     MBBLPad->setIsEHPad(false);
11254 
11255   // The instruction is gone now.
11256   MI.eraseFromParent();
11257 }
11258 
11259 static
11260 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
11261   for (MachineBasicBlock *S : MBB->successors())
11262     if (S != Succ)
11263       return S;
11264   llvm_unreachable("Expecting a BB with two successors!");
11265 }
11266 
11267 /// Return the load opcode for a given load size. If load size >= 8,
11268 /// neon opcode will be returned.
11269 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
11270   if (LdSize >= 8)
11271     return LdSize == 16 ? ARM::VLD1q32wb_fixed
11272                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
11273   if (IsThumb1)
11274     return LdSize == 4 ? ARM::tLDRi
11275                        : LdSize == 2 ? ARM::tLDRHi
11276                                      : LdSize == 1 ? ARM::tLDRBi : 0;
11277   if (IsThumb2)
11278     return LdSize == 4 ? ARM::t2LDR_POST
11279                        : LdSize == 2 ? ARM::t2LDRH_POST
11280                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
11281   return LdSize == 4 ? ARM::LDR_POST_IMM
11282                      : LdSize == 2 ? ARM::LDRH_POST
11283                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
11284 }
11285 
11286 /// Return the store opcode for a given store size. If store size >= 8,
11287 /// neon opcode will be returned.
11288 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
11289   if (StSize >= 8)
11290     return StSize == 16 ? ARM::VST1q32wb_fixed
11291                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
11292   if (IsThumb1)
11293     return StSize == 4 ? ARM::tSTRi
11294                        : StSize == 2 ? ARM::tSTRHi
11295                                      : StSize == 1 ? ARM::tSTRBi : 0;
11296   if (IsThumb2)
11297     return StSize == 4 ? ARM::t2STR_POST
11298                        : StSize == 2 ? ARM::t2STRH_POST
11299                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
11300   return StSize == 4 ? ARM::STR_POST_IMM
11301                      : StSize == 2 ? ARM::STRH_POST
11302                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
11303 }
11304 
11305 /// Emit a post-increment load operation with given size. The instructions
11306 /// will be added to BB at Pos.
11307 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
11308                        const TargetInstrInfo *TII, const DebugLoc &dl,
11309                        unsigned LdSize, unsigned Data, unsigned AddrIn,
11310                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
11311   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
11312   assert(LdOpc != 0 && "Should have a load opcode");
11313   if (LdSize >= 8) {
11314     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
11315         .addReg(AddrOut, RegState::Define)
11316         .addReg(AddrIn)
11317         .addImm(0)
11318         .add(predOps(ARMCC::AL));
11319   } else if (IsThumb1) {
11320     // load + update AddrIn
11321     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
11322         .addReg(AddrIn)
11323         .addImm(0)
11324         .add(predOps(ARMCC::AL));
11325     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
11326         .add(t1CondCodeOp())
11327         .addReg(AddrIn)
11328         .addImm(LdSize)
11329         .add(predOps(ARMCC::AL));
11330   } else if (IsThumb2) {
11331     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
11332         .addReg(AddrOut, RegState::Define)
11333         .addReg(AddrIn)
11334         .addImm(LdSize)
11335         .add(predOps(ARMCC::AL));
11336   } else { // arm
11337     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
11338         .addReg(AddrOut, RegState::Define)
11339         .addReg(AddrIn)
11340         .addReg(0)
11341         .addImm(LdSize)
11342         .add(predOps(ARMCC::AL));
11343   }
11344 }
11345 
11346 /// Emit a post-increment store operation with given size. The instructions
11347 /// will be added to BB at Pos.
11348 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
11349                        const TargetInstrInfo *TII, const DebugLoc &dl,
11350                        unsigned StSize, unsigned Data, unsigned AddrIn,
11351                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
11352   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
11353   assert(StOpc != 0 && "Should have a store opcode");
11354   if (StSize >= 8) {
11355     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
11356         .addReg(AddrIn)
11357         .addImm(0)
11358         .addReg(Data)
11359         .add(predOps(ARMCC::AL));
11360   } else if (IsThumb1) {
11361     // store + update AddrIn
11362     BuildMI(*BB, Pos, dl, TII->get(StOpc))
11363         .addReg(Data)
11364         .addReg(AddrIn)
11365         .addImm(0)
11366         .add(predOps(ARMCC::AL));
11367     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
11368         .add(t1CondCodeOp())
11369         .addReg(AddrIn)
11370         .addImm(StSize)
11371         .add(predOps(ARMCC::AL));
11372   } else if (IsThumb2) {
11373     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
11374         .addReg(Data)
11375         .addReg(AddrIn)
11376         .addImm(StSize)
11377         .add(predOps(ARMCC::AL));
11378   } else { // arm
11379     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
11380         .addReg(Data)
11381         .addReg(AddrIn)
11382         .addReg(0)
11383         .addImm(StSize)
11384         .add(predOps(ARMCC::AL));
11385   }
11386 }
11387 
11388 MachineBasicBlock *
11389 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
11390                                    MachineBasicBlock *BB) const {
11391   // This pseudo instruction has 3 operands: dst, src, size
11392   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
11393   // Otherwise, we will generate unrolled scalar copies.
11394   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
11395   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11396   MachineFunction::iterator It = ++BB->getIterator();
11397 
11398   Register dest = MI.getOperand(0).getReg();
11399   Register src = MI.getOperand(1).getReg();
11400   unsigned SizeVal = MI.getOperand(2).getImm();
11401   unsigned Alignment = MI.getOperand(3).getImm();
11402   DebugLoc dl = MI.getDebugLoc();
11403 
11404   MachineFunction *MF = BB->getParent();
11405   MachineRegisterInfo &MRI = MF->getRegInfo();
11406   unsigned UnitSize = 0;
11407   const TargetRegisterClass *TRC = nullptr;
11408   const TargetRegisterClass *VecTRC = nullptr;
11409 
11410   bool IsThumb1 = Subtarget->isThumb1Only();
11411   bool IsThumb2 = Subtarget->isThumb2();
11412   bool IsThumb = Subtarget->isThumb();
11413 
11414   if (Alignment & 1) {
11415     UnitSize = 1;
11416   } else if (Alignment & 2) {
11417     UnitSize = 2;
11418   } else {
11419     // Check whether we can use NEON instructions.
11420     if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
11421         Subtarget->hasNEON()) {
11422       if ((Alignment % 16 == 0) && SizeVal >= 16)
11423         UnitSize = 16;
11424       else if ((Alignment % 8 == 0) && SizeVal >= 8)
11425         UnitSize = 8;
11426     }
11427     // Can't use NEON instructions.
11428     if (UnitSize == 0)
11429       UnitSize = 4;
11430   }
11431 
11432   // Select the correct opcode and register class for unit size load/store
11433   bool IsNeon = UnitSize >= 8;
11434   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
11435   if (IsNeon)
11436     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
11437                             : UnitSize == 8 ? &ARM::DPRRegClass
11438                                             : nullptr;
11439 
11440   unsigned BytesLeft = SizeVal % UnitSize;
11441   unsigned LoopSize = SizeVal - BytesLeft;
11442 
11443   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
11444     // Use LDR and STR to copy.
11445     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
11446     // [destOut] = STR_POST(scratch, destIn, UnitSize)
11447     unsigned srcIn = src;
11448     unsigned destIn = dest;
11449     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
11450       Register srcOut = MRI.createVirtualRegister(TRC);
11451       Register destOut = MRI.createVirtualRegister(TRC);
11452       Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
11453       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
11454                  IsThumb1, IsThumb2);
11455       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
11456                  IsThumb1, IsThumb2);
11457       srcIn = srcOut;
11458       destIn = destOut;
11459     }
11460 
11461     // Handle the leftover bytes with LDRB and STRB.
11462     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
11463     // [destOut] = STRB_POST(scratch, destIn, 1)
11464     for (unsigned i = 0; i < BytesLeft; i++) {
11465       Register srcOut = MRI.createVirtualRegister(TRC);
11466       Register destOut = MRI.createVirtualRegister(TRC);
11467       Register scratch = MRI.createVirtualRegister(TRC);
11468       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
11469                  IsThumb1, IsThumb2);
11470       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
11471                  IsThumb1, IsThumb2);
11472       srcIn = srcOut;
11473       destIn = destOut;
11474     }
11475     MI.eraseFromParent(); // The instruction is gone now.
11476     return BB;
11477   }
11478 
11479   // Expand the pseudo op to a loop.
11480   // thisMBB:
11481   //   ...
11482   //   movw varEnd, # --> with thumb2
11483   //   movt varEnd, #
11484   //   ldrcp varEnd, idx --> without thumb2
11485   //   fallthrough --> loopMBB
11486   // loopMBB:
11487   //   PHI varPhi, varEnd, varLoop
11488   //   PHI srcPhi, src, srcLoop
11489   //   PHI destPhi, dst, destLoop
11490   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
11491   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
11492   //   subs varLoop, varPhi, #UnitSize
11493   //   bne loopMBB
11494   //   fallthrough --> exitMBB
11495   // exitMBB:
11496   //   epilogue to handle left-over bytes
11497   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
11498   //   [destOut] = STRB_POST(scratch, destLoop, 1)
11499   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
11500   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
11501   MF->insert(It, loopMBB);
11502   MF->insert(It, exitMBB);
11503 
11504   // Set the call frame size on entry to the new basic blocks.
11505   unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
11506   loopMBB->setCallFrameSize(CallFrameSize);
11507   exitMBB->setCallFrameSize(CallFrameSize);
11508 
11509   // Transfer the remainder of BB and its successor edges to exitMBB.
11510   exitMBB->splice(exitMBB->begin(), BB,
11511                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11512   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11513 
11514   // Load an immediate to varEnd.
11515   Register varEnd = MRI.createVirtualRegister(TRC);
11516   if (Subtarget->useMovt()) {
11517     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm),
11518             varEnd)
11519         .addImm(LoopSize);
11520   } else if (Subtarget->genExecuteOnly()) {
11521     assert(IsThumb && "Non-thumb expected to have used movt");
11522     BuildMI(BB, dl, TII->get(ARM::tMOVi32imm), varEnd).addImm(LoopSize);
11523   } else {
11524     MachineConstantPool *ConstantPool = MF->getConstantPool();
11525     Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
11526     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
11527 
11528     // MachineConstantPool wants an explicit alignment.
11529     Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
11530     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
11531     MachineMemOperand *CPMMO =
11532         MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
11533                                  MachineMemOperand::MOLoad, 4, Align(4));
11534 
11535     if (IsThumb)
11536       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
11537           .addReg(varEnd, RegState::Define)
11538           .addConstantPoolIndex(Idx)
11539           .add(predOps(ARMCC::AL))
11540           .addMemOperand(CPMMO);
11541     else
11542       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
11543           .addReg(varEnd, RegState::Define)
11544           .addConstantPoolIndex(Idx)
11545           .addImm(0)
11546           .add(predOps(ARMCC::AL))
11547           .addMemOperand(CPMMO);
11548   }
11549   BB->addSuccessor(loopMBB);
11550 
11551   // Generate the loop body:
11552   //   varPhi = PHI(varLoop, varEnd)
11553   //   srcPhi = PHI(srcLoop, src)
11554   //   destPhi = PHI(destLoop, dst)
11555   MachineBasicBlock *entryBB = BB;
11556   BB = loopMBB;
11557   Register varLoop = MRI.createVirtualRegister(TRC);
11558   Register varPhi = MRI.createVirtualRegister(TRC);
11559   Register srcLoop = MRI.createVirtualRegister(TRC);
11560   Register srcPhi = MRI.createVirtualRegister(TRC);
11561   Register destLoop = MRI.createVirtualRegister(TRC);
11562   Register destPhi = MRI.createVirtualRegister(TRC);
11563 
11564   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
11565     .addReg(varLoop).addMBB(loopMBB)
11566     .addReg(varEnd).addMBB(entryBB);
11567   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
11568     .addReg(srcLoop).addMBB(loopMBB)
11569     .addReg(src).addMBB(entryBB);
11570   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
11571     .addReg(destLoop).addMBB(loopMBB)
11572     .addReg(dest).addMBB(entryBB);
11573 
11574   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
11575   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
11576   Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
11577   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
11578              IsThumb1, IsThumb2);
11579   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
11580              IsThumb1, IsThumb2);
11581 
11582   // Decrement loop variable by UnitSize.
11583   if (IsThumb1) {
11584     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
11585         .add(t1CondCodeOp())
11586         .addReg(varPhi)
11587         .addImm(UnitSize)
11588         .add(predOps(ARMCC::AL));
11589   } else {
11590     MachineInstrBuilder MIB =
11591         BuildMI(*BB, BB->end(), dl,
11592                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
11593     MIB.addReg(varPhi)
11594         .addImm(UnitSize)
11595         .add(predOps(ARMCC::AL))
11596         .add(condCodeOp());
11597     MIB->getOperand(5).setReg(ARM::CPSR);
11598     MIB->getOperand(5).setIsDef(true);
11599   }
11600   BuildMI(*BB, BB->end(), dl,
11601           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
11602       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
11603 
11604   // loopMBB can loop back to loopMBB or fall through to exitMBB.
11605   BB->addSuccessor(loopMBB);
11606   BB->addSuccessor(exitMBB);
11607 
11608   // Add epilogue to handle BytesLeft.
11609   BB = exitMBB;
11610   auto StartOfExit = exitMBB->begin();
11611 
11612   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
11613   //   [destOut] = STRB_POST(scratch, destLoop, 1)
11614   unsigned srcIn = srcLoop;
11615   unsigned destIn = destLoop;
11616   for (unsigned i = 0; i < BytesLeft; i++) {
11617     Register srcOut = MRI.createVirtualRegister(TRC);
11618     Register destOut = MRI.createVirtualRegister(TRC);
11619     Register scratch = MRI.createVirtualRegister(TRC);
11620     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
11621                IsThumb1, IsThumb2);
11622     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
11623                IsThumb1, IsThumb2);
11624     srcIn = srcOut;
11625     destIn = destOut;
11626   }
11627 
11628   MI.eraseFromParent(); // The instruction is gone now.
11629   return BB;
11630 }
11631 
11632 MachineBasicBlock *
11633 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
11634                                        MachineBasicBlock *MBB) const {
11635   const TargetMachine &TM = getTargetMachine();
11636   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
11637   DebugLoc DL = MI.getDebugLoc();
11638 
11639   assert(Subtarget->isTargetWindows() &&
11640          "__chkstk is only supported on Windows");
11641   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
11642 
11643   // __chkstk takes the number of words to allocate on the stack in R4, and
11644   // returns the stack adjustment in number of bytes in R4.  This will not
11645   // clober any other registers (other than the obvious lr).
11646   //
11647   // Although, technically, IP should be considered a register which may be
11648   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
11649   // thumb-2 environment, so there is no interworking required.  As a result, we
11650   // do not expect a veneer to be emitted by the linker, clobbering IP.
11651   //
11652   // Each module receives its own copy of __chkstk, so no import thunk is
11653   // required, again, ensuring that IP is not clobbered.
11654   //
11655   // Finally, although some linkers may theoretically provide a trampoline for
11656   // out of range calls (which is quite common due to a 32M range limitation of
11657   // branches for Thumb), we can generate the long-call version via
11658   // -mcmodel=large, alleviating the need for the trampoline which may clobber
11659   // IP.
11660 
11661   switch (TM.getCodeModel()) {
11662   case CodeModel::Tiny:
11663     llvm_unreachable("Tiny code model not available on ARM.");
11664   case CodeModel::Small:
11665   case CodeModel::Medium:
11666   case CodeModel::Kernel:
11667     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
11668         .add(predOps(ARMCC::AL))
11669         .addExternalSymbol("__chkstk")
11670         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
11671         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
11672         .addReg(ARM::R12,
11673                 RegState::Implicit | RegState::Define | RegState::Dead)
11674         .addReg(ARM::CPSR,
11675                 RegState::Implicit | RegState::Define | RegState::Dead);
11676     break;
11677   case CodeModel::Large: {
11678     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
11679     Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11680 
11681     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
11682       .addExternalSymbol("__chkstk");
11683     BuildMI(*MBB, MI, DL, TII.get(gettBLXrOpcode(*MBB->getParent())))
11684         .add(predOps(ARMCC::AL))
11685         .addReg(Reg, RegState::Kill)
11686         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
11687         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
11688         .addReg(ARM::R12,
11689                 RegState::Implicit | RegState::Define | RegState::Dead)
11690         .addReg(ARM::CPSR,
11691                 RegState::Implicit | RegState::Define | RegState::Dead);
11692     break;
11693   }
11694   }
11695 
11696   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
11697       .addReg(ARM::SP, RegState::Kill)
11698       .addReg(ARM::R4, RegState::Kill)
11699       .setMIFlags(MachineInstr::FrameSetup)
11700       .add(predOps(ARMCC::AL))
11701       .add(condCodeOp());
11702 
11703   MI.eraseFromParent();
11704   return MBB;
11705 }
11706 
11707 MachineBasicBlock *
11708 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
11709                                        MachineBasicBlock *MBB) const {
11710   DebugLoc DL = MI.getDebugLoc();
11711   MachineFunction *MF = MBB->getParent();
11712   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
11713 
11714   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
11715   MF->insert(++MBB->getIterator(), ContBB);
11716   ContBB->splice(ContBB->begin(), MBB,
11717                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11718   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
11719   MBB->addSuccessor(ContBB);
11720 
11721   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
11722   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
11723   MF->push_back(TrapBB);
11724   MBB->addSuccessor(TrapBB);
11725 
11726   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
11727       .addReg(MI.getOperand(0).getReg())
11728       .addImm(0)
11729       .add(predOps(ARMCC::AL));
11730   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
11731       .addMBB(TrapBB)
11732       .addImm(ARMCC::EQ)
11733       .addReg(ARM::CPSR);
11734 
11735   MI.eraseFromParent();
11736   return ContBB;
11737 }
11738 
11739 // The CPSR operand of SelectItr might be missing a kill marker
11740 // because there were multiple uses of CPSR, and ISel didn't know
11741 // which to mark. Figure out whether SelectItr should have had a
11742 // kill marker, and set it if it should. Returns the correct kill
11743 // marker value.
11744 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
11745                                    MachineBasicBlock* BB,
11746                                    const TargetRegisterInfo* TRI) {
11747   // Scan forward through BB for a use/def of CPSR.
11748   MachineBasicBlock::iterator miI(std::next(SelectItr));
11749   for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
11750     const MachineInstr& mi = *miI;
11751     if (mi.readsRegister(ARM::CPSR))
11752       return false;
11753     if (mi.definesRegister(ARM::CPSR))
11754       break; // Should have kill-flag - update below.
11755   }
11756 
11757   // If we hit the end of the block, check whether CPSR is live into a
11758   // successor.
11759   if (miI == BB->end()) {
11760     for (MachineBasicBlock *Succ : BB->successors())
11761       if (Succ->isLiveIn(ARM::CPSR))
11762         return false;
11763   }
11764 
11765   // We found a def, or hit the end of the basic block and CPSR wasn't live
11766   // out. SelectMI should have a kill flag on CPSR.
11767   SelectItr->addRegisterKilled(ARM::CPSR, TRI);
11768   return true;
11769 }
11770 
11771 /// Adds logic in loop entry MBB to calculate loop iteration count and adds
11772 /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop
11773 static Register genTPEntry(MachineBasicBlock *TpEntry,
11774                            MachineBasicBlock *TpLoopBody,
11775                            MachineBasicBlock *TpExit, Register OpSizeReg,
11776                            const TargetInstrInfo *TII, DebugLoc Dl,
11777                            MachineRegisterInfo &MRI) {
11778   // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4.
11779   Register AddDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11780   BuildMI(TpEntry, Dl, TII->get(ARM::t2ADDri), AddDestReg)
11781       .addUse(OpSizeReg)
11782       .addImm(15)
11783       .add(predOps(ARMCC::AL))
11784       .addReg(0);
11785 
11786   Register LsrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11787   BuildMI(TpEntry, Dl, TII->get(ARM::t2LSRri), LsrDestReg)
11788       .addUse(AddDestReg, RegState::Kill)
11789       .addImm(4)
11790       .add(predOps(ARMCC::AL))
11791       .addReg(0);
11792 
11793   Register TotalIterationsReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass);
11794   BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopSetup), TotalIterationsReg)
11795       .addUse(LsrDestReg, RegState::Kill);
11796 
11797   BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopStart))
11798       .addUse(TotalIterationsReg)
11799       .addMBB(TpExit);
11800 
11801   BuildMI(TpEntry, Dl, TII->get(ARM::t2B))
11802       .addMBB(TpLoopBody)
11803       .add(predOps(ARMCC::AL));
11804 
11805   return TotalIterationsReg;
11806 }
11807 
11808 /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and
11809 /// t2DoLoopEnd. These are used by later passes to generate tail predicated
11810 /// loops.
11811 static void genTPLoopBody(MachineBasicBlock *TpLoopBody,
11812                           MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit,
11813                           const TargetInstrInfo *TII, DebugLoc Dl,
11814                           MachineRegisterInfo &MRI, Register OpSrcReg,
11815                           Register OpDestReg, Register ElementCountReg,
11816                           Register TotalIterationsReg, bool IsMemcpy) {
11817   // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest
11818   // array, loop iteration counter, predication counter.
11819 
11820   Register SrcPhiReg, CurrSrcReg;
11821   if (IsMemcpy) {
11822     //  Current position in the src array
11823     SrcPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11824     CurrSrcReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11825     BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), SrcPhiReg)
11826         .addUse(OpSrcReg)
11827         .addMBB(TpEntry)
11828         .addUse(CurrSrcReg)
11829         .addMBB(TpLoopBody);
11830   }
11831 
11832   // Current position in the dest array
11833   Register DestPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11834   Register CurrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11835   BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), DestPhiReg)
11836       .addUse(OpDestReg)
11837       .addMBB(TpEntry)
11838       .addUse(CurrDestReg)
11839       .addMBB(TpLoopBody);
11840 
11841   // Current loop counter
11842   Register LoopCounterPhiReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass);
11843   Register RemainingLoopIterationsReg =
11844       MRI.createVirtualRegister(&ARM::GPRlrRegClass);
11845   BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), LoopCounterPhiReg)
11846       .addUse(TotalIterationsReg)
11847       .addMBB(TpEntry)
11848       .addUse(RemainingLoopIterationsReg)
11849       .addMBB(TpLoopBody);
11850 
11851   // Predication counter
11852   Register PredCounterPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11853   Register RemainingElementsReg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
11854   BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), PredCounterPhiReg)
11855       .addUse(ElementCountReg)
11856       .addMBB(TpEntry)
11857       .addUse(RemainingElementsReg)
11858       .addMBB(TpLoopBody);
11859 
11860   // Pass predication counter to VCTP
11861   Register VccrReg = MRI.createVirtualRegister(&ARM::VCCRRegClass);
11862   BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VCTP8), VccrReg)
11863       .addUse(PredCounterPhiReg)
11864       .addImm(ARMVCC::None)
11865       .addReg(0)
11866       .addReg(0);
11867 
11868   BuildMI(TpLoopBody, Dl, TII->get(ARM::t2SUBri), RemainingElementsReg)
11869       .addUse(PredCounterPhiReg)
11870       .addImm(16)
11871       .add(predOps(ARMCC::AL))
11872       .addReg(0);
11873 
11874   // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR
11875   Register SrcValueReg;
11876   if (IsMemcpy) {
11877     SrcValueReg = MRI.createVirtualRegister(&ARM::MQPRRegClass);
11878     BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VLDRBU8_post))
11879         .addDef(CurrSrcReg)
11880         .addDef(SrcValueReg)
11881         .addReg(SrcPhiReg)
11882         .addImm(16)
11883         .addImm(ARMVCC::Then)
11884         .addUse(VccrReg)
11885         .addReg(0);
11886   } else
11887     SrcValueReg = OpSrcReg;
11888 
11889   BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VSTRBU8_post))
11890       .addDef(CurrDestReg)
11891       .addUse(SrcValueReg)
11892       .addReg(DestPhiReg)
11893       .addImm(16)
11894       .addImm(ARMVCC::Then)
11895       .addUse(VccrReg)
11896       .addReg(0);
11897 
11898   // Add the pseudoInstrs for decrementing the loop counter and marking the
11899   // end:t2DoLoopDec and t2DoLoopEnd
11900   BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopDec), RemainingLoopIterationsReg)
11901       .addUse(LoopCounterPhiReg)
11902       .addImm(1);
11903 
11904   BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopEnd))
11905       .addUse(RemainingLoopIterationsReg)
11906       .addMBB(TpLoopBody);
11907 
11908   BuildMI(TpLoopBody, Dl, TII->get(ARM::t2B))
11909       .addMBB(TpExit)
11910       .add(predOps(ARMCC::AL));
11911 }
11912 
11913 MachineBasicBlock *
11914 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11915                                                MachineBasicBlock *BB) const {
11916   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
11917   DebugLoc dl = MI.getDebugLoc();
11918   bool isThumb2 = Subtarget->isThumb2();
11919   switch (MI.getOpcode()) {
11920   default: {
11921     MI.print(errs());
11922     llvm_unreachable("Unexpected instr type to insert");
11923   }
11924 
11925   // Thumb1 post-indexed loads are really just single-register LDMs.
11926   case ARM::tLDR_postidx: {
11927     MachineOperand Def(MI.getOperand(1));
11928     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
11929         .add(Def)  // Rn_wb
11930         .add(MI.getOperand(2))  // Rn
11931         .add(MI.getOperand(3))  // PredImm
11932         .add(MI.getOperand(4))  // PredReg
11933         .add(MI.getOperand(0))  // Rt
11934         .cloneMemRefs(MI);
11935     MI.eraseFromParent();
11936     return BB;
11937   }
11938 
11939   case ARM::MVE_MEMCPYLOOPINST:
11940   case ARM::MVE_MEMSETLOOPINST: {
11941 
11942     // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo
11943     // into a Tail Predicated (TP) Loop. It adds the instructions to calculate
11944     // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and
11945     // adds the relevant instructions in the TP loop Body for generation of a
11946     // WLSTP loop.
11947 
11948     // Below is relevant portion of the CFG after the transformation.
11949     // The Machine Basic Blocks are shown along with branch conditions (in
11950     // brackets). Note that TP entry/exit MBBs depict the entry/exit of this
11951     // portion of the CFG and may not necessarily be the entry/exit of the
11952     // function.
11953 
11954     //             (Relevant) CFG after transformation:
11955     //               TP entry MBB
11956     //                   |
11957     //          |-----------------|
11958     //       (n <= 0)          (n > 0)
11959     //          |                 |
11960     //          |         TP loop Body MBB<--|
11961     //          |                |           |
11962     //           \               |___________|
11963     //            \             /
11964     //              TP exit MBB
11965 
11966     MachineFunction *MF = BB->getParent();
11967     MachineFunctionProperties &Properties = MF->getProperties();
11968     MachineRegisterInfo &MRI = MF->getRegInfo();
11969 
11970     Register OpDestReg = MI.getOperand(0).getReg();
11971     Register OpSrcReg = MI.getOperand(1).getReg();
11972     Register OpSizeReg = MI.getOperand(2).getReg();
11973 
11974     // Allocate the required MBBs and add to parent function.
11975     MachineBasicBlock *TpEntry = BB;
11976     MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock();
11977     MachineBasicBlock *TpExit;
11978 
11979     MF->push_back(TpLoopBody);
11980 
11981     // If any instructions are present in the current block after
11982     // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and
11983     // move the instructions into the newly created exit block. If there are no
11984     // instructions add an explicit branch to the FallThrough block and then
11985     // split.
11986     //
11987     // The split is required for two reasons:
11988     // 1) A terminator(t2WhileLoopStart) will be placed at that site.
11989     // 2) Since a TPLoopBody will be added later, any phis in successive blocks
11990     //    need to be updated. splitAt() already handles this.
11991     TpExit = BB->splitAt(MI, false);
11992     if (TpExit == BB) {
11993       assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the "
11994                                      "block containing memcpy/memset Pseudo");
11995       TpExit = BB->getFallThrough();
11996       BuildMI(BB, dl, TII->get(ARM::t2B))
11997           .addMBB(TpExit)
11998           .add(predOps(ARMCC::AL));
11999       TpExit = BB->splitAt(MI, false);
12000     }
12001 
12002     // Add logic for iteration count
12003     Register TotalIterationsReg =
12004         genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, dl, MRI);
12005 
12006     // Add the vectorized (and predicated) loads/store instructions
12007     bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST;
12008     genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, dl, MRI, OpSrcReg,
12009                   OpDestReg, OpSizeReg, TotalIterationsReg, IsMemcpy);
12010 
12011     // Required to avoid conflict with the MachineVerifier during testing.
12012     Properties.reset(MachineFunctionProperties::Property::NoPHIs);
12013 
12014     // Connect the blocks
12015     TpEntry->addSuccessor(TpLoopBody);
12016     TpLoopBody->addSuccessor(TpLoopBody);
12017     TpLoopBody->addSuccessor(TpExit);
12018 
12019     // Reorder for a more natural layout
12020     TpLoopBody->moveAfter(TpEntry);
12021     TpExit->moveAfter(TpLoopBody);
12022 
12023     // Finally, remove the memcpy Pseudo Instruction
12024     MI.eraseFromParent();
12025 
12026     // Return the exit block as it may contain other instructions requiring a
12027     // custom inserter
12028     return TpExit;
12029   }
12030 
12031   // The Thumb2 pre-indexed stores have the same MI operands, they just
12032   // define them differently in the .td files from the isel patterns, so
12033   // they need pseudos.
12034   case ARM::t2STR_preidx:
12035     MI.setDesc(TII->get(ARM::t2STR_PRE));
12036     return BB;
12037   case ARM::t2STRB_preidx:
12038     MI.setDesc(TII->get(ARM::t2STRB_PRE));
12039     return BB;
12040   case ARM::t2STRH_preidx:
12041     MI.setDesc(TII->get(ARM::t2STRH_PRE));
12042     return BB;
12043 
12044   case ARM::STRi_preidx:
12045   case ARM::STRBi_preidx: {
12046     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
12047                                                          : ARM::STRB_PRE_IMM;
12048     // Decode the offset.
12049     unsigned Offset = MI.getOperand(4).getImm();
12050     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
12051     Offset = ARM_AM::getAM2Offset(Offset);
12052     if (isSub)
12053       Offset = -Offset;
12054 
12055     MachineMemOperand *MMO = *MI.memoperands_begin();
12056     BuildMI(*BB, MI, dl, TII->get(NewOpc))
12057         .add(MI.getOperand(0)) // Rn_wb
12058         .add(MI.getOperand(1)) // Rt
12059         .add(MI.getOperand(2)) // Rn
12060         .addImm(Offset)        // offset (skip GPR==zero_reg)
12061         .add(MI.getOperand(5)) // pred
12062         .add(MI.getOperand(6))
12063         .addMemOperand(MMO);
12064     MI.eraseFromParent();
12065     return BB;
12066   }
12067   case ARM::STRr_preidx:
12068   case ARM::STRBr_preidx:
12069   case ARM::STRH_preidx: {
12070     unsigned NewOpc;
12071     switch (MI.getOpcode()) {
12072     default: llvm_unreachable("unexpected opcode!");
12073     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
12074     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
12075     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
12076     }
12077     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
12078     for (const MachineOperand &MO : MI.operands())
12079       MIB.add(MO);
12080     MI.eraseFromParent();
12081     return BB;
12082   }
12083 
12084   case ARM::tMOVCCr_pseudo: {
12085     // To "insert" a SELECT_CC instruction, we actually have to insert the
12086     // diamond control-flow pattern.  The incoming instruction knows the
12087     // destination vreg to set, the condition code register to branch on, the
12088     // true/false values to select between, and a branch opcode to use.
12089     const BasicBlock *LLVM_BB = BB->getBasicBlock();
12090     MachineFunction::iterator It = ++BB->getIterator();
12091 
12092     //  thisMBB:
12093     //  ...
12094     //   TrueVal = ...
12095     //   cmpTY ccX, r1, r2
12096     //   bCC copy1MBB
12097     //   fallthrough --> copy0MBB
12098     MachineBasicBlock *thisMBB  = BB;
12099     MachineFunction *F = BB->getParent();
12100     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12101     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
12102     F->insert(It, copy0MBB);
12103     F->insert(It, sinkMBB);
12104 
12105     // Set the call frame size on entry to the new basic blocks.
12106     unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
12107     copy0MBB->setCallFrameSize(CallFrameSize);
12108     sinkMBB->setCallFrameSize(CallFrameSize);
12109 
12110     // Check whether CPSR is live past the tMOVCCr_pseudo.
12111     const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
12112     if (!MI.killsRegister(ARM::CPSR) &&
12113         !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
12114       copy0MBB->addLiveIn(ARM::CPSR);
12115       sinkMBB->addLiveIn(ARM::CPSR);
12116     }
12117 
12118     // Transfer the remainder of BB and its successor edges to sinkMBB.
12119     sinkMBB->splice(sinkMBB->begin(), BB,
12120                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12121     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12122 
12123     BB->addSuccessor(copy0MBB);
12124     BB->addSuccessor(sinkMBB);
12125 
12126     BuildMI(BB, dl, TII->get(ARM::tBcc))
12127         .addMBB(sinkMBB)
12128         .addImm(MI.getOperand(3).getImm())
12129         .addReg(MI.getOperand(4).getReg());
12130 
12131     //  copy0MBB:
12132     //   %FalseValue = ...
12133     //   # fallthrough to sinkMBB
12134     BB = copy0MBB;
12135 
12136     // Update machine-CFG edges
12137     BB->addSuccessor(sinkMBB);
12138 
12139     //  sinkMBB:
12140     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12141     //  ...
12142     BB = sinkMBB;
12143     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
12144         .addReg(MI.getOperand(1).getReg())
12145         .addMBB(copy0MBB)
12146         .addReg(MI.getOperand(2).getReg())
12147         .addMBB(thisMBB);
12148 
12149     MI.eraseFromParent(); // The pseudo instruction is gone now.
12150     return BB;
12151   }
12152 
12153   case ARM::BCCi64:
12154   case ARM::BCCZi64: {
12155     // If there is an unconditional branch to the other successor, remove it.
12156     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
12157 
12158     // Compare both parts that make up the double comparison separately for
12159     // equality.
12160     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
12161 
12162     Register LHS1 = MI.getOperand(1).getReg();
12163     Register LHS2 = MI.getOperand(2).getReg();
12164     if (RHSisZero) {
12165       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
12166           .addReg(LHS1)
12167           .addImm(0)
12168           .add(predOps(ARMCC::AL));
12169       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
12170         .addReg(LHS2).addImm(0)
12171         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
12172     } else {
12173       Register RHS1 = MI.getOperand(3).getReg();
12174       Register RHS2 = MI.getOperand(4).getReg();
12175       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
12176           .addReg(LHS1)
12177           .addReg(RHS1)
12178           .add(predOps(ARMCC::AL));
12179       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
12180         .addReg(LHS2).addReg(RHS2)
12181         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
12182     }
12183 
12184     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
12185     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
12186     if (MI.getOperand(0).getImm() == ARMCC::NE)
12187       std::swap(destMBB, exitMBB);
12188 
12189     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
12190       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
12191     if (isThumb2)
12192       BuildMI(BB, dl, TII->get(ARM::t2B))
12193           .addMBB(exitMBB)
12194           .add(predOps(ARMCC::AL));
12195     else
12196       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
12197 
12198     MI.eraseFromParent(); // The pseudo instruction is gone now.
12199     return BB;
12200   }
12201 
12202   case ARM::Int_eh_sjlj_setjmp:
12203   case ARM::Int_eh_sjlj_setjmp_nofp:
12204   case ARM::tInt_eh_sjlj_setjmp:
12205   case ARM::t2Int_eh_sjlj_setjmp:
12206   case ARM::t2Int_eh_sjlj_setjmp_nofp:
12207     return BB;
12208 
12209   case ARM::Int_eh_sjlj_setup_dispatch:
12210     EmitSjLjDispatchBlock(MI, BB);
12211     return BB;
12212 
12213   case ARM::ABS:
12214   case ARM::t2ABS: {
12215     // To insert an ABS instruction, we have to insert the
12216     // diamond control-flow pattern.  The incoming instruction knows the
12217     // source vreg to test against 0, the destination vreg to set,
12218     // the condition code register to branch on, the
12219     // true/false values to select between, and a branch opcode to use.
12220     // It transforms
12221     //     V1 = ABS V0
12222     // into
12223     //     V2 = MOVS V0
12224     //     BCC                      (branch to SinkBB if V0 >= 0)
12225     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
12226     //     SinkBB: V1 = PHI(V2, V3)
12227     const BasicBlock *LLVM_BB = BB->getBasicBlock();
12228     MachineFunction::iterator BBI = ++BB->getIterator();
12229     MachineFunction *Fn = BB->getParent();
12230     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
12231     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
12232     Fn->insert(BBI, RSBBB);
12233     Fn->insert(BBI, SinkBB);
12234 
12235     Register ABSSrcReg = MI.getOperand(1).getReg();
12236     Register ABSDstReg = MI.getOperand(0).getReg();
12237     bool ABSSrcKIll = MI.getOperand(1).isKill();
12238     bool isThumb2 = Subtarget->isThumb2();
12239     MachineRegisterInfo &MRI = Fn->getRegInfo();
12240     // In Thumb mode S must not be specified if source register is the SP or
12241     // PC and if destination register is the SP, so restrict register class
12242     Register NewRsbDstReg = MRI.createVirtualRegister(
12243         isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
12244 
12245     // Transfer the remainder of BB and its successor edges to sinkMBB.
12246     SinkBB->splice(SinkBB->begin(), BB,
12247                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
12248     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
12249 
12250     BB->addSuccessor(RSBBB);
12251     BB->addSuccessor(SinkBB);
12252 
12253     // fall through to SinkMBB
12254     RSBBB->addSuccessor(SinkBB);
12255 
12256     // insert a cmp at the end of BB
12257     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
12258         .addReg(ABSSrcReg)
12259         .addImm(0)
12260         .add(predOps(ARMCC::AL));
12261 
12262     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
12263     BuildMI(BB, dl,
12264       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
12265       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
12266 
12267     // insert rsbri in RSBBB
12268     // Note: BCC and rsbri will be converted into predicated rsbmi
12269     // by if-conversion pass
12270     BuildMI(*RSBBB, RSBBB->begin(), dl,
12271             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
12272         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
12273         .addImm(0)
12274         .add(predOps(ARMCC::AL))
12275         .add(condCodeOp());
12276 
12277     // insert PHI in SinkBB,
12278     // reuse ABSDstReg to not change uses of ABS instruction
12279     BuildMI(*SinkBB, SinkBB->begin(), dl,
12280       TII->get(ARM::PHI), ABSDstReg)
12281       .addReg(NewRsbDstReg).addMBB(RSBBB)
12282       .addReg(ABSSrcReg).addMBB(BB);
12283 
12284     // remove ABS instruction
12285     MI.eraseFromParent();
12286 
12287     // return last added BB
12288     return SinkBB;
12289   }
12290   case ARM::COPY_STRUCT_BYVAL_I32:
12291     ++NumLoopByVals;
12292     return EmitStructByval(MI, BB);
12293   case ARM::WIN__CHKSTK:
12294     return EmitLowered__chkstk(MI, BB);
12295   case ARM::WIN__DBZCHK:
12296     return EmitLowered__dbzchk(MI, BB);
12297   }
12298 }
12299 
12300 /// Attaches vregs to MEMCPY that it will use as scratch registers
12301 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
12302 /// instead of as a custom inserter because we need the use list from the SDNode.
12303 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
12304                                     MachineInstr &MI, const SDNode *Node) {
12305   bool isThumb1 = Subtarget->isThumb1Only();
12306 
12307   DebugLoc DL = MI.getDebugLoc();
12308   MachineFunction *MF = MI.getParent()->getParent();
12309   MachineRegisterInfo &MRI = MF->getRegInfo();
12310   MachineInstrBuilder MIB(*MF, MI);
12311 
12312   // If the new dst/src is unused mark it as dead.
12313   if (!Node->hasAnyUseOfValue(0)) {
12314     MI.getOperand(0).setIsDead(true);
12315   }
12316   if (!Node->hasAnyUseOfValue(1)) {
12317     MI.getOperand(1).setIsDead(true);
12318   }
12319 
12320   // The MEMCPY both defines and kills the scratch registers.
12321   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
12322     Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
12323                                                          : &ARM::GPRRegClass);
12324     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
12325   }
12326 }
12327 
12328 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
12329                                                       SDNode *Node) const {
12330   if (MI.getOpcode() == ARM::MEMCPY) {
12331     attachMEMCPYScratchRegs(Subtarget, MI, Node);
12332     return;
12333   }
12334 
12335   const MCInstrDesc *MCID = &MI.getDesc();
12336   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
12337   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
12338   // operand is still set to noreg. If needed, set the optional operand's
12339   // register to CPSR, and remove the redundant implicit def.
12340   //
12341   // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
12342 
12343   // Rename pseudo opcodes.
12344   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
12345   unsigned ccOutIdx;
12346   if (NewOpc) {
12347     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
12348     MCID = &TII->get(NewOpc);
12349 
12350     assert(MCID->getNumOperands() ==
12351            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
12352         && "converted opcode should be the same except for cc_out"
12353            " (and, on Thumb1, pred)");
12354 
12355     MI.setDesc(*MCID);
12356 
12357     // Add the optional cc_out operand
12358     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
12359 
12360     // On Thumb1, move all input operands to the end, then add the predicate
12361     if (Subtarget->isThumb1Only()) {
12362       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
12363         MI.addOperand(MI.getOperand(1));
12364         MI.removeOperand(1);
12365       }
12366 
12367       // Restore the ties
12368       for (unsigned i = MI.getNumOperands(); i--;) {
12369         const MachineOperand& op = MI.getOperand(i);
12370         if (op.isReg() && op.isUse()) {
12371           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
12372           if (DefIdx != -1)
12373             MI.tieOperands(DefIdx, i);
12374         }
12375       }
12376 
12377       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
12378       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
12379       ccOutIdx = 1;
12380     } else
12381       ccOutIdx = MCID->getNumOperands() - 1;
12382   } else
12383     ccOutIdx = MCID->getNumOperands() - 1;
12384 
12385   // Any ARM instruction that sets the 's' bit should specify an optional
12386   // "cc_out" operand in the last operand position.
12387   if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) {
12388     assert(!NewOpc && "Optional cc_out operand required");
12389     return;
12390   }
12391   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
12392   // since we already have an optional CPSR def.
12393   bool definesCPSR = false;
12394   bool deadCPSR = false;
12395   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
12396        ++i) {
12397     const MachineOperand &MO = MI.getOperand(i);
12398     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
12399       definesCPSR = true;
12400       if (MO.isDead())
12401         deadCPSR = true;
12402       MI.removeOperand(i);
12403       break;
12404     }
12405   }
12406   if (!definesCPSR) {
12407     assert(!NewOpc && "Optional cc_out operand required");
12408     return;
12409   }
12410   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
12411   if (deadCPSR) {
12412     assert(!MI.getOperand(ccOutIdx).getReg() &&
12413            "expect uninitialized optional cc_out operand");
12414     // Thumb1 instructions must have the S bit even if the CPSR is dead.
12415     if (!Subtarget->isThumb1Only())
12416       return;
12417   }
12418 
12419   // If this instruction was defined with an optional CPSR def and its dag node
12420   // had a live implicit CPSR def, then activate the optional CPSR def.
12421   MachineOperand &MO = MI.getOperand(ccOutIdx);
12422   MO.setReg(ARM::CPSR);
12423   MO.setIsDef(true);
12424 }
12425 
12426 //===----------------------------------------------------------------------===//
12427 //                           ARM Optimization Hooks
12428 //===----------------------------------------------------------------------===//
12429 
12430 // Helper function that checks if N is a null or all ones constant.
12431 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
12432   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
12433 }
12434 
12435 // Return true if N is conditionally 0 or all ones.
12436 // Detects these expressions where cc is an i1 value:
12437 //
12438 //   (select cc 0, y)   [AllOnes=0]
12439 //   (select cc y, 0)   [AllOnes=0]
12440 //   (zext cc)          [AllOnes=0]
12441 //   (sext cc)          [AllOnes=0/1]
12442 //   (select cc -1, y)  [AllOnes=1]
12443 //   (select cc y, -1)  [AllOnes=1]
12444 //
12445 // Invert is set when N is the null/all ones constant when CC is false.
12446 // OtherOp is set to the alternative value of N.
12447 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
12448                                        SDValue &CC, bool &Invert,
12449                                        SDValue &OtherOp,
12450                                        SelectionDAG &DAG) {
12451   switch (N->getOpcode()) {
12452   default: return false;
12453   case ISD::SELECT: {
12454     CC = N->getOperand(0);
12455     SDValue N1 = N->getOperand(1);
12456     SDValue N2 = N->getOperand(2);
12457     if (isZeroOrAllOnes(N1, AllOnes)) {
12458       Invert = false;
12459       OtherOp = N2;
12460       return true;
12461     }
12462     if (isZeroOrAllOnes(N2, AllOnes)) {
12463       Invert = true;
12464       OtherOp = N1;
12465       return true;
12466     }
12467     return false;
12468   }
12469   case ISD::ZERO_EXTEND:
12470     // (zext cc) can never be the all ones value.
12471     if (AllOnes)
12472       return false;
12473     [[fallthrough]];
12474   case ISD::SIGN_EXTEND: {
12475     SDLoc dl(N);
12476     EVT VT = N->getValueType(0);
12477     CC = N->getOperand(0);
12478     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
12479       return false;
12480     Invert = !AllOnes;
12481     if (AllOnes)
12482       // When looking for an AllOnes constant, N is an sext, and the 'other'
12483       // value is 0.
12484       OtherOp = DAG.getConstant(0, dl, VT);
12485     else if (N->getOpcode() == ISD::ZERO_EXTEND)
12486       // When looking for a 0 constant, N can be zext or sext.
12487       OtherOp = DAG.getConstant(1, dl, VT);
12488     else
12489       OtherOp = DAG.getAllOnesConstant(dl, VT);
12490     return true;
12491   }
12492   }
12493 }
12494 
12495 // Combine a constant select operand into its use:
12496 //
12497 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
12498 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
12499 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
12500 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
12501 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
12502 //
12503 // The transform is rejected if the select doesn't have a constant operand that
12504 // is null, or all ones when AllOnes is set.
12505 //
12506 // Also recognize sext/zext from i1:
12507 //
12508 //   (add (zext cc), x) -> (select cc (add x, 1), x)
12509 //   (add (sext cc), x) -> (select cc (add x, -1), x)
12510 //
12511 // These transformations eventually create predicated instructions.
12512 //
12513 // @param N       The node to transform.
12514 // @param Slct    The N operand that is a select.
12515 // @param OtherOp The other N operand (x above).
12516 // @param DCI     Context.
12517 // @param AllOnes Require the select constant to be all ones instead of null.
12518 // @returns The new node, or SDValue() on failure.
12519 static
12520 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
12521                             TargetLowering::DAGCombinerInfo &DCI,
12522                             bool AllOnes = false) {
12523   SelectionDAG &DAG = DCI.DAG;
12524   EVT VT = N->getValueType(0);
12525   SDValue NonConstantVal;
12526   SDValue CCOp;
12527   bool SwapSelectOps;
12528   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
12529                                   NonConstantVal, DAG))
12530     return SDValue();
12531 
12532   // Slct is now know to be the desired identity constant when CC is true.
12533   SDValue TrueVal = OtherOp;
12534   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
12535                                  OtherOp, NonConstantVal);
12536   // Unless SwapSelectOps says CC should be false.
12537   if (SwapSelectOps)
12538     std::swap(TrueVal, FalseVal);
12539 
12540   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
12541                      CCOp, TrueVal, FalseVal);
12542 }
12543 
12544 // Attempt combineSelectAndUse on each operand of a commutative operator N.
12545 static
12546 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
12547                                        TargetLowering::DAGCombinerInfo &DCI) {
12548   SDValue N0 = N->getOperand(0);
12549   SDValue N1 = N->getOperand(1);
12550   if (N0.getNode()->hasOneUse())
12551     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
12552       return Result;
12553   if (N1.getNode()->hasOneUse())
12554     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
12555       return Result;
12556   return SDValue();
12557 }
12558 
12559 static bool IsVUZPShuffleNode(SDNode *N) {
12560   // VUZP shuffle node.
12561   if (N->getOpcode() == ARMISD::VUZP)
12562     return true;
12563 
12564   // "VUZP" on i32 is an alias for VTRN.
12565   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
12566     return true;
12567 
12568   return false;
12569 }
12570 
12571 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
12572                                  TargetLowering::DAGCombinerInfo &DCI,
12573                                  const ARMSubtarget *Subtarget) {
12574   // Look for ADD(VUZP.0, VUZP.1).
12575   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
12576       N0 == N1)
12577    return SDValue();
12578 
12579   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
12580   if (!N->getValueType(0).is64BitVector())
12581     return SDValue();
12582 
12583   // Generate vpadd.
12584   SelectionDAG &DAG = DCI.DAG;
12585   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12586   SDLoc dl(N);
12587   SDNode *Unzip = N0.getNode();
12588   EVT VT = N->getValueType(0);
12589 
12590   SmallVector<SDValue, 8> Ops;
12591   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
12592                                 TLI.getPointerTy(DAG.getDataLayout())));
12593   Ops.push_back(Unzip->getOperand(0));
12594   Ops.push_back(Unzip->getOperand(1));
12595 
12596   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
12597 }
12598 
12599 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
12600                                       TargetLowering::DAGCombinerInfo &DCI,
12601                                       const ARMSubtarget *Subtarget) {
12602   // Check for two extended operands.
12603   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
12604         N1.getOpcode() == ISD::SIGN_EXTEND) &&
12605       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
12606         N1.getOpcode() == ISD::ZERO_EXTEND))
12607     return SDValue();
12608 
12609   SDValue N00 = N0.getOperand(0);
12610   SDValue N10 = N1.getOperand(0);
12611 
12612   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
12613   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
12614       N00 == N10)
12615     return SDValue();
12616 
12617   // We only recognize Q register paddl here; this can't be reached until
12618   // after type legalization.
12619   if (!N00.getValueType().is64BitVector() ||
12620       !N0.getValueType().is128BitVector())
12621     return SDValue();
12622 
12623   // Generate vpaddl.
12624   SelectionDAG &DAG = DCI.DAG;
12625   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12626   SDLoc dl(N);
12627   EVT VT = N->getValueType(0);
12628 
12629   SmallVector<SDValue, 8> Ops;
12630   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
12631   unsigned Opcode;
12632   if (N0.getOpcode() == ISD::SIGN_EXTEND)
12633     Opcode = Intrinsic::arm_neon_vpaddls;
12634   else
12635     Opcode = Intrinsic::arm_neon_vpaddlu;
12636   Ops.push_back(DAG.getConstant(Opcode, dl,
12637                                 TLI.getPointerTy(DAG.getDataLayout())));
12638   EVT ElemTy = N00.getValueType().getVectorElementType();
12639   unsigned NumElts = VT.getVectorNumElements();
12640   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
12641   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
12642                                N00.getOperand(0), N00.getOperand(1));
12643   Ops.push_back(Concat);
12644 
12645   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
12646 }
12647 
12648 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
12649 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
12650 // much easier to match.
12651 static SDValue
12652 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
12653                                TargetLowering::DAGCombinerInfo &DCI,
12654                                const ARMSubtarget *Subtarget) {
12655   // Only perform optimization if after legalize, and if NEON is available. We
12656   // also expected both operands to be BUILD_VECTORs.
12657   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
12658       || N0.getOpcode() != ISD::BUILD_VECTOR
12659       || N1.getOpcode() != ISD::BUILD_VECTOR)
12660     return SDValue();
12661 
12662   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
12663   EVT VT = N->getValueType(0);
12664   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
12665     return SDValue();
12666 
12667   // Check that the vector operands are of the right form.
12668   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
12669   // operands, where N is the size of the formed vector.
12670   // Each EXTRACT_VECTOR should have the same input vector and odd or even
12671   // index such that we have a pair wise add pattern.
12672 
12673   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
12674   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
12675     return SDValue();
12676   SDValue Vec = N0->getOperand(0)->getOperand(0);
12677   SDNode *V = Vec.getNode();
12678   unsigned nextIndex = 0;
12679 
12680   // For each operands to the ADD which are BUILD_VECTORs,
12681   // check to see if each of their operands are an EXTRACT_VECTOR with
12682   // the same vector and appropriate index.
12683   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
12684     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
12685         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
12686 
12687       SDValue ExtVec0 = N0->getOperand(i);
12688       SDValue ExtVec1 = N1->getOperand(i);
12689 
12690       // First operand is the vector, verify its the same.
12691       if (V != ExtVec0->getOperand(0).getNode() ||
12692           V != ExtVec1->getOperand(0).getNode())
12693         return SDValue();
12694 
12695       // Second is the constant, verify its correct.
12696       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
12697       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
12698 
12699       // For the constant, we want to see all the even or all the odd.
12700       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
12701           || C1->getZExtValue() != nextIndex+1)
12702         return SDValue();
12703 
12704       // Increment index.
12705       nextIndex+=2;
12706     } else
12707       return SDValue();
12708   }
12709 
12710   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
12711   // we're using the entire input vector, otherwise there's a size/legality
12712   // mismatch somewhere.
12713   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
12714       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
12715     return SDValue();
12716 
12717   // Create VPADDL node.
12718   SelectionDAG &DAG = DCI.DAG;
12719   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12720 
12721   SDLoc dl(N);
12722 
12723   // Build operand list.
12724   SmallVector<SDValue, 8> Ops;
12725   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
12726                                 TLI.getPointerTy(DAG.getDataLayout())));
12727 
12728   // Input is the vector.
12729   Ops.push_back(Vec);
12730 
12731   // Get widened type and narrowed type.
12732   MVT widenType;
12733   unsigned numElem = VT.getVectorNumElements();
12734 
12735   EVT inputLaneType = Vec.getValueType().getVectorElementType();
12736   switch (inputLaneType.getSimpleVT().SimpleTy) {
12737     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
12738     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
12739     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
12740     default:
12741       llvm_unreachable("Invalid vector element type for padd optimization.");
12742   }
12743 
12744   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
12745   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
12746   return DAG.getNode(ExtOp, dl, VT, tmp);
12747 }
12748 
12749 static SDValue findMUL_LOHI(SDValue V) {
12750   if (V->getOpcode() == ISD::UMUL_LOHI ||
12751       V->getOpcode() == ISD::SMUL_LOHI)
12752     return V;
12753   return SDValue();
12754 }
12755 
12756 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
12757                                         TargetLowering::DAGCombinerInfo &DCI,
12758                                         const ARMSubtarget *Subtarget) {
12759   if (!Subtarget->hasBaseDSP())
12760     return SDValue();
12761 
12762   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
12763   // accumulates the product into a 64-bit value. The 16-bit values will
12764   // be sign extended somehow or SRA'd into 32-bit values
12765   // (addc (adde (mul 16bit, 16bit), lo), hi)
12766   SDValue Mul = AddcNode->getOperand(0);
12767   SDValue Lo = AddcNode->getOperand(1);
12768   if (Mul.getOpcode() != ISD::MUL) {
12769     Lo = AddcNode->getOperand(0);
12770     Mul = AddcNode->getOperand(1);
12771     if (Mul.getOpcode() != ISD::MUL)
12772       return SDValue();
12773   }
12774 
12775   SDValue SRA = AddeNode->getOperand(0);
12776   SDValue Hi = AddeNode->getOperand(1);
12777   if (SRA.getOpcode() != ISD::SRA) {
12778     SRA = AddeNode->getOperand(1);
12779     Hi = AddeNode->getOperand(0);
12780     if (SRA.getOpcode() != ISD::SRA)
12781       return SDValue();
12782   }
12783   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
12784     if (Const->getZExtValue() != 31)
12785       return SDValue();
12786   } else
12787     return SDValue();
12788 
12789   if (SRA.getOperand(0) != Mul)
12790     return SDValue();
12791 
12792   SelectionDAG &DAG = DCI.DAG;
12793   SDLoc dl(AddcNode);
12794   unsigned Opcode = 0;
12795   SDValue Op0;
12796   SDValue Op1;
12797 
12798   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
12799     Opcode = ARMISD::SMLALBB;
12800     Op0 = Mul.getOperand(0);
12801     Op1 = Mul.getOperand(1);
12802   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
12803     Opcode = ARMISD::SMLALBT;
12804     Op0 = Mul.getOperand(0);
12805     Op1 = Mul.getOperand(1).getOperand(0);
12806   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
12807     Opcode = ARMISD::SMLALTB;
12808     Op0 = Mul.getOperand(0).getOperand(0);
12809     Op1 = Mul.getOperand(1);
12810   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
12811     Opcode = ARMISD::SMLALTT;
12812     Op0 = Mul->getOperand(0).getOperand(0);
12813     Op1 = Mul->getOperand(1).getOperand(0);
12814   }
12815 
12816   if (!Op0 || !Op1)
12817     return SDValue();
12818 
12819   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
12820                               Op0, Op1, Lo, Hi);
12821   // Replace the ADDs' nodes uses by the MLA node's values.
12822   SDValue HiMLALResult(SMLAL.getNode(), 1);
12823   SDValue LoMLALResult(SMLAL.getNode(), 0);
12824 
12825   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
12826   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
12827 
12828   // Return original node to notify the driver to stop replacing.
12829   SDValue resNode(AddcNode, 0);
12830   return resNode;
12831 }
12832 
12833 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
12834                                      TargetLowering::DAGCombinerInfo &DCI,
12835                                      const ARMSubtarget *Subtarget) {
12836   // Look for multiply add opportunities.
12837   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
12838   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
12839   // a glue link from the first add to the second add.
12840   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
12841   // a S/UMLAL instruction.
12842   //                  UMUL_LOHI
12843   //                 / :lo    \ :hi
12844   //                V          \          [no multiline comment]
12845   //    loAdd ->  ADDC         |
12846   //                 \ :carry /
12847   //                  V      V
12848   //                    ADDE   <- hiAdd
12849   //
12850   // In the special case where only the higher part of a signed result is used
12851   // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
12852   // a constant with the exact value of 0x80000000, we recognize we are dealing
12853   // with a "rounded multiply and add" (or subtract) and transform it into
12854   // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
12855 
12856   assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
12857           AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
12858          "Expect an ADDE or SUBE");
12859 
12860   assert(AddeSubeNode->getNumOperands() == 3 &&
12861          AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
12862          "ADDE node has the wrong inputs");
12863 
12864   // Check that we are chained to the right ADDC or SUBC node.
12865   SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
12866   if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
12867        AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
12868       (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
12869        AddcSubcNode->getOpcode() != ARMISD::SUBC))
12870     return SDValue();
12871 
12872   SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
12873   SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);
12874 
12875   // Check if the two operands are from the same mul_lohi node.
12876   if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
12877     return SDValue();
12878 
12879   assert(AddcSubcNode->getNumValues() == 2 &&
12880          AddcSubcNode->getValueType(0) == MVT::i32 &&
12881          "Expect ADDC with two result values. First: i32");
12882 
12883   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
12884   // maybe a SMLAL which multiplies two 16-bit values.
12885   if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
12886       AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
12887       AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
12888       AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
12889       AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
12890     return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);
12891 
12892   // Check for the triangle shape.
12893   SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
12894   SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);
12895 
12896   // Make sure that the ADDE/SUBE operands are not coming from the same node.
12897   if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
12898     return SDValue();
12899 
12900   // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
12901   bool IsLeftOperandMUL = false;
12902   SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
12903   if (MULOp == SDValue())
12904     MULOp = findMUL_LOHI(AddeSubeOp1);
12905   else
12906     IsLeftOperandMUL = true;
12907   if (MULOp == SDValue())
12908     return SDValue();
12909 
12910   // Figure out the right opcode.
12911   unsigned Opc = MULOp->getOpcode();
12912   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
12913 
12914   // Figure out the high and low input values to the MLAL node.
12915   SDValue *HiAddSub = nullptr;
12916   SDValue *LoMul = nullptr;
12917   SDValue *LowAddSub = nullptr;
12918 
12919   // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
12920   if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
12921     return SDValue();
12922 
12923   if (IsLeftOperandMUL)
12924     HiAddSub = &AddeSubeOp1;
12925   else
12926     HiAddSub = &AddeSubeOp0;
12927 
12928   // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
12929   // whose low result is fed to the ADDC/SUBC we are checking.
12930 
12931   if (AddcSubcOp0 == MULOp.getValue(0)) {
12932     LoMul = &AddcSubcOp0;
12933     LowAddSub = &AddcSubcOp1;
12934   }
12935   if (AddcSubcOp1 == MULOp.getValue(0)) {
12936     LoMul = &AddcSubcOp1;
12937     LowAddSub = &AddcSubcOp0;
12938   }
12939 
12940   if (!LoMul)
12941     return SDValue();
12942 
12943   // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
12944   // the replacement below will create a cycle.
12945   if (AddcSubcNode == HiAddSub->getNode() ||
12946       AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
12947     return SDValue();
12948 
12949   // Create the merged node.
12950   SelectionDAG &DAG = DCI.DAG;
12951 
12952   // Start building operand list.
12953   SmallVector<SDValue, 8> Ops;
12954   Ops.push_back(LoMul->getOperand(0));
12955   Ops.push_back(LoMul->getOperand(1));
12956 
12957   // Check whether we can use SMMLAR, SMMLSR or SMMULR instead.  For this to be
12958   // the case, we must be doing signed multiplication and only use the higher
12959   // part of the result of the MLAL, furthermore the LowAddSub must be a constant
12960   // addition or subtraction with the value of 0x800000.
12961   if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
12962       FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
12963       LowAddSub->getNode()->getOpcode() == ISD::Constant &&
12964       static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
12965           0x80000000) {
12966     Ops.push_back(*HiAddSub);
12967     if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
12968       FinalOpc = ARMISD::SMMLSR;
12969     } else {
12970       FinalOpc = ARMISD::SMMLAR;
12971     }
12972     SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
12973     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);
12974 
12975     return SDValue(AddeSubeNode, 0);
12976   } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
12977     // SMMLS is generated during instruction selection and the rest of this
12978     // function can not handle the case where AddcSubcNode is a SUBC.
12979     return SDValue();
12980 
12981   // Finish building the operand list for {U/S}MLAL
12982   Ops.push_back(*LowAddSub);
12983   Ops.push_back(*HiAddSub);
12984 
12985   SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
12986                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
12987 
12988   // Replace the ADDs' nodes uses by the MLA node's values.
12989   SDValue HiMLALResult(MLALNode.getNode(), 1);
12990   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);
12991 
12992   SDValue LoMLALResult(MLALNode.getNode(), 0);
12993   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);
12994 
12995   // Return original node to notify the driver to stop replacing.
12996   return SDValue(AddeSubeNode, 0);
12997 }
12998 
12999 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
13000                                       TargetLowering::DAGCombinerInfo &DCI,
13001                                       const ARMSubtarget *Subtarget) {
13002   // UMAAL is similar to UMLAL except that it adds two unsigned values.
13003   // While trying to combine for the other MLAL nodes, first search for the
13004   // chance to use UMAAL. Check if Addc uses a node which has already
13005   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
13006   // as the addend, and it's handled in PerformUMLALCombine.
13007 
13008   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
13009     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
13010 
13011   // Check that we have a glued ADDC node.
13012   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
13013   if (AddcNode->getOpcode() != ARMISD::ADDC)
13014     return SDValue();
13015 
13016   // Find the converted UMAAL or quit if it doesn't exist.
13017   SDNode *UmlalNode = nullptr;
13018   SDValue AddHi;
13019   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
13020     UmlalNode = AddcNode->getOperand(0).getNode();
13021     AddHi = AddcNode->getOperand(1);
13022   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
13023     UmlalNode = AddcNode->getOperand(1).getNode();
13024     AddHi = AddcNode->getOperand(0);
13025   } else {
13026     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
13027   }
13028 
13029   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
13030   // the ADDC as well as Zero.
13031   if (!isNullConstant(UmlalNode->getOperand(3)))
13032     return SDValue();
13033 
13034   if ((isNullConstant(AddeNode->getOperand(0)) &&
13035        AddeNode->getOperand(1).getNode() == UmlalNode) ||
13036       (AddeNode->getOperand(0).getNode() == UmlalNode &&
13037        isNullConstant(AddeNode->getOperand(1)))) {
13038     SelectionDAG &DAG = DCI.DAG;
13039     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
13040                       UmlalNode->getOperand(2), AddHi };
13041     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
13042                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
13043 
13044     // Replace the ADDs' nodes uses by the UMAAL node's values.
13045     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
13046     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
13047 
13048     // Return original node to notify the driver to stop replacing.
13049     return SDValue(AddeNode, 0);
13050   }
13051   return SDValue();
13052 }
13053 
13054 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
13055                                    const ARMSubtarget *Subtarget) {
13056   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
13057     return SDValue();
13058 
13059   // Check that we have a pair of ADDC and ADDE as operands.
13060   // Both addends of the ADDE must be zero.
13061   SDNode* AddcNode = N->getOperand(2).getNode();
13062   SDNode* AddeNode = N->getOperand(3).getNode();
13063   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
13064       (AddeNode->getOpcode() == ARMISD::ADDE) &&
13065       isNullConstant(AddeNode->getOperand(0)) &&
13066       isNullConstant(AddeNode->getOperand(1)) &&
13067       (AddeNode->getOperand(2).getNode() == AddcNode))
13068     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
13069                        DAG.getVTList(MVT::i32, MVT::i32),
13070                        {N->getOperand(0), N->getOperand(1),
13071                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
13072   else
13073     return SDValue();
13074 }
13075 
13076 static SDValue PerformAddcSubcCombine(SDNode *N,
13077                                       TargetLowering::DAGCombinerInfo &DCI,
13078                                       const ARMSubtarget *Subtarget) {
13079   SelectionDAG &DAG(DCI.DAG);
13080 
13081   if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(1)) {
13082     // (SUBC (ADDE 0, 0, C), 1) -> C
13083     SDValue LHS = N->getOperand(0);
13084     SDValue RHS = N->getOperand(1);
13085     if (LHS->getOpcode() == ARMISD::ADDE &&
13086         isNullConstant(LHS->getOperand(0)) &&
13087         isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
13088       return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
13089     }
13090   }
13091 
13092   if (Subtarget->isThumb1Only()) {
13093     SDValue RHS = N->getOperand(1);
13094     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
13095       int32_t imm = C->getSExtValue();
13096       if (imm < 0 && imm > std::numeric_limits<int>::min()) {
13097         SDLoc DL(N);
13098         RHS = DAG.getConstant(-imm, DL, MVT::i32);
13099         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
13100                                                            : ARMISD::ADDC;
13101         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
13102       }
13103     }
13104   }
13105 
13106   return SDValue();
13107 }
13108 
13109 static SDValue PerformAddeSubeCombine(SDNode *N,
13110                                       TargetLowering::DAGCombinerInfo &DCI,
13111                                       const ARMSubtarget *Subtarget) {
13112   if (Subtarget->isThumb1Only()) {
13113     SelectionDAG &DAG = DCI.DAG;
13114     SDValue RHS = N->getOperand(1);
13115     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
13116       int64_t imm = C->getSExtValue();
13117       if (imm < 0) {
13118         SDLoc DL(N);
13119 
13120         // The with-carry-in form matches bitwise not instead of the negation.
13121         // Effectively, the inverse interpretation of the carry flag already
13122         // accounts for part of the negation.
13123         RHS = DAG.getConstant(~imm, DL, MVT::i32);
13124 
13125         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
13126                                                            : ARMISD::ADDE;
13127         return DAG.getNode(Opcode, DL, N->getVTList(),
13128                            N->getOperand(0), RHS, N->getOperand(2));
13129       }
13130     }
13131   } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
13132     return AddCombineTo64bitMLAL(N, DCI, Subtarget);
13133   }
13134   return SDValue();
13135 }
13136 
13137 static SDValue PerformSELECTCombine(SDNode *N,
13138                                     TargetLowering::DAGCombinerInfo &DCI,
13139                                     const ARMSubtarget *Subtarget) {
13140   if (!Subtarget->hasMVEIntegerOps())
13141     return SDValue();
13142 
13143   SDLoc dl(N);
13144   SDValue SetCC;
13145   SDValue LHS;
13146   SDValue RHS;
13147   ISD::CondCode CC;
13148   SDValue TrueVal;
13149   SDValue FalseVal;
13150 
13151   if (N->getOpcode() == ISD::SELECT &&
13152       N->getOperand(0)->getOpcode() == ISD::SETCC) {
13153     SetCC = N->getOperand(0);
13154     LHS = SetCC->getOperand(0);
13155     RHS = SetCC->getOperand(1);
13156     CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
13157     TrueVal = N->getOperand(1);
13158     FalseVal = N->getOperand(2);
13159   } else if (N->getOpcode() == ISD::SELECT_CC) {
13160     LHS = N->getOperand(0);
13161     RHS = N->getOperand(1);
13162     CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
13163     TrueVal = N->getOperand(2);
13164     FalseVal = N->getOperand(3);
13165   } else {
13166     return SDValue();
13167   }
13168 
13169   unsigned int Opcode = 0;
13170   if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN ||
13171        FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) &&
13172       (CC == ISD::SETULT || CC == ISD::SETUGT)) {
13173     Opcode = ARMISD::VMINVu;
13174     if (CC == ISD::SETUGT)
13175       std::swap(TrueVal, FalseVal);
13176   } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN ||
13177               FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) &&
13178              (CC == ISD::SETLT || CC == ISD::SETGT)) {
13179     Opcode = ARMISD::VMINVs;
13180     if (CC == ISD::SETGT)
13181       std::swap(TrueVal, FalseVal);
13182   } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX ||
13183               FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) &&
13184              (CC == ISD::SETUGT || CC == ISD::SETULT)) {
13185     Opcode = ARMISD::VMAXVu;
13186     if (CC == ISD::SETULT)
13187       std::swap(TrueVal, FalseVal);
13188   } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX ||
13189               FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) &&
13190              (CC == ISD::SETGT || CC == ISD::SETLT)) {
13191     Opcode = ARMISD::VMAXVs;
13192     if (CC == ISD::SETLT)
13193       std::swap(TrueVal, FalseVal);
13194   } else
13195     return SDValue();
13196 
13197   // Normalise to the right hand side being the vector reduction
13198   switch (TrueVal->getOpcode()) {
13199   case ISD::VECREDUCE_UMIN:
13200   case ISD::VECREDUCE_SMIN:
13201   case ISD::VECREDUCE_UMAX:
13202   case ISD::VECREDUCE_SMAX:
13203     std::swap(LHS, RHS);
13204     std::swap(TrueVal, FalseVal);
13205     break;
13206   }
13207 
13208   EVT VectorType = FalseVal->getOperand(0).getValueType();
13209 
13210   if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 &&
13211       VectorType != MVT::v4i32)
13212     return SDValue();
13213 
13214   EVT VectorScalarType = VectorType.getVectorElementType();
13215 
13216   // The values being selected must also be the ones being compared
13217   if (TrueVal != LHS || FalseVal != RHS)
13218     return SDValue();
13219 
13220   EVT LeftType = LHS->getValueType(0);
13221   EVT RightType = RHS->getValueType(0);
13222 
13223   // The types must match the reduced type too
13224   if (LeftType != VectorScalarType || RightType != VectorScalarType)
13225     return SDValue();
13226 
13227   // Legalise the scalar to an i32
13228   if (VectorScalarType != MVT::i32)
13229     LHS = DCI.DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
13230 
13231   // Generate the reduction as an i32 for legalisation purposes
13232   auto Reduction =
13233       DCI.DAG.getNode(Opcode, dl, MVT::i32, LHS, RHS->getOperand(0));
13234 
13235   // The result isn't actually an i32 so truncate it back to its original type
13236   if (VectorScalarType != MVT::i32)
13237     Reduction = DCI.DAG.getNode(ISD::TRUNCATE, dl, VectorScalarType, Reduction);
13238 
13239   return Reduction;
13240 }
13241 
13242 // A special combine for the vqdmulh family of instructions. This is one of the
13243 // potential set of patterns that could patch this instruction. The base pattern
13244 // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))).
13245 // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))),
13246 // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as
13247 // the max is unnecessary.
13248 static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) {
13249   EVT VT = N->getValueType(0);
13250   SDValue Shft;
13251   ConstantSDNode *Clamp;
13252 
13253   if (!VT.isVector() || VT.getScalarSizeInBits() > 64)
13254     return SDValue();
13255 
13256   if (N->getOpcode() == ISD::SMIN) {
13257     Shft = N->getOperand(0);
13258     Clamp = isConstOrConstSplat(N->getOperand(1));
13259   } else if (N->getOpcode() == ISD::VSELECT) {
13260     // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin.
13261     SDValue Cmp = N->getOperand(0);
13262     if (Cmp.getOpcode() != ISD::SETCC ||
13263         cast<CondCodeSDNode>(Cmp.getOperand(2))->get() != ISD::SETLT ||
13264         Cmp.getOperand(0) != N->getOperand(1) ||
13265         Cmp.getOperand(1) != N->getOperand(2))
13266       return SDValue();
13267     Shft = N->getOperand(1);
13268     Clamp = isConstOrConstSplat(N->getOperand(2));
13269   } else
13270     return SDValue();
13271 
13272   if (!Clamp)
13273     return SDValue();
13274 
13275   MVT ScalarType;
13276   int ShftAmt = 0;
13277   switch (Clamp->getSExtValue()) {
13278   case (1 << 7) - 1:
13279     ScalarType = MVT::i8;
13280     ShftAmt = 7;
13281     break;
13282   case (1 << 15) - 1:
13283     ScalarType = MVT::i16;
13284     ShftAmt = 15;
13285     break;
13286   case (1ULL << 31) - 1:
13287     ScalarType = MVT::i32;
13288     ShftAmt = 31;
13289     break;
13290   default:
13291     return SDValue();
13292   }
13293 
13294   if (Shft.getOpcode() != ISD::SRA)
13295     return SDValue();
13296   ConstantSDNode *N1 = isConstOrConstSplat(Shft.getOperand(1));
13297   if (!N1 || N1->getSExtValue() != ShftAmt)
13298     return SDValue();
13299 
13300   SDValue Mul = Shft.getOperand(0);
13301   if (Mul.getOpcode() != ISD::MUL)
13302     return SDValue();
13303 
13304   SDValue Ext0 = Mul.getOperand(0);
13305   SDValue Ext1 = Mul.getOperand(1);
13306   if (Ext0.getOpcode() != ISD::SIGN_EXTEND ||
13307       Ext1.getOpcode() != ISD::SIGN_EXTEND)
13308     return SDValue();
13309   EVT VecVT = Ext0.getOperand(0).getValueType();
13310   if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1)
13311     return SDValue();
13312   if (Ext1.getOperand(0).getValueType() != VecVT ||
13313       VecVT.getScalarType() != ScalarType ||
13314       VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2)
13315     return SDValue();
13316 
13317   SDLoc DL(Mul);
13318   unsigned LegalLanes = 128 / (ShftAmt + 1);
13319   EVT LegalVecVT = MVT::getVectorVT(ScalarType, LegalLanes);
13320   // For types smaller than legal vectors extend to be legal and only use needed
13321   // lanes.
13322   if (VecVT.getSizeInBits() < 128) {
13323     EVT ExtVecVT =
13324         MVT::getVectorVT(MVT::getIntegerVT(128 / VecVT.getVectorNumElements()),
13325                          VecVT.getVectorNumElements());
13326     SDValue Inp0 =
13327         DAG.getNode(ISD::ANY_EXTEND, DL, ExtVecVT, Ext0.getOperand(0));
13328     SDValue Inp1 =
13329         DAG.getNode(ISD::ANY_EXTEND, DL, ExtVecVT, Ext1.getOperand(0));
13330     Inp0 = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, LegalVecVT, Inp0);
13331     Inp1 = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, LegalVecVT, Inp1);
13332     SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, LegalVecVT, Inp0, Inp1);
13333     SDValue Trunc = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, ExtVecVT, VQDMULH);
13334     Trunc = DAG.getNode(ISD::TRUNCATE, DL, VecVT, Trunc);
13335     return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Trunc);
13336   }
13337 
13338   // For larger types, split into legal sized chunks.
13339   assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type");
13340   unsigned NumParts = VecVT.getSizeInBits() / 128;
13341   SmallVector<SDValue> Parts;
13342   for (unsigned I = 0; I < NumParts; ++I) {
13343     SDValue Inp0 =
13344         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LegalVecVT, Ext0.getOperand(0),
13345                     DAG.getVectorIdxConstant(I * LegalLanes, DL));
13346     SDValue Inp1 =
13347         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LegalVecVT, Ext1.getOperand(0),
13348                     DAG.getVectorIdxConstant(I * LegalLanes, DL));
13349     SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, LegalVecVT, Inp0, Inp1);
13350     Parts.push_back(VQDMULH);
13351   }
13352   return DAG.getNode(ISD::SIGN_EXTEND, DL, VT,
13353                      DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Parts));
13354 }
13355 
13356 static SDValue PerformVSELECTCombine(SDNode *N,
13357                                      TargetLowering::DAGCombinerInfo &DCI,
13358                                      const ARMSubtarget *Subtarget) {
13359   if (!Subtarget->hasMVEIntegerOps())
13360     return SDValue();
13361 
13362   if (SDValue V = PerformVQDMULHCombine(N, DCI.DAG))
13363     return V;
13364 
13365   // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs).
13366   //
13367   // We need to re-implement this optimization here as the implementation in the
13368   // Target-Independent DAGCombiner does not handle the kind of constant we make
13369   // (it calls isConstOrConstSplat with AllowTruncation set to false - and for
13370   // good reason, allowing truncation there would break other targets).
13371   //
13372   // Currently, this is only done for MVE, as it's the only target that benefits
13373   // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL).
13374   if (N->getOperand(0).getOpcode() != ISD::XOR)
13375     return SDValue();
13376   SDValue XOR = N->getOperand(0);
13377 
13378   // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s.
13379   // It is important to check with truncation allowed as the BUILD_VECTORs we
13380   // generate in those situations will truncate their operands.
13381   ConstantSDNode *Const =
13382       isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false,
13383                           /*AllowTruncation*/ true);
13384   if (!Const || !Const->isOne())
13385     return SDValue();
13386 
13387   // Rewrite into vselect(cond, rhs, lhs).
13388   SDValue Cond = XOR->getOperand(0);
13389   SDValue LHS = N->getOperand(1);
13390   SDValue RHS = N->getOperand(2);
13391   EVT Type = N->getValueType(0);
13392   return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS);
13393 }
13394 
13395 // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n
13396 static SDValue PerformVSetCCToVCTPCombine(SDNode *N,
13397                                           TargetLowering::DAGCombinerInfo &DCI,
13398                                           const ARMSubtarget *Subtarget) {
13399   SDValue Op0 = N->getOperand(0);
13400   SDValue Op1 = N->getOperand(1);
13401   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13402   EVT VT = N->getValueType(0);
13403 
13404   if (!Subtarget->hasMVEIntegerOps() ||
13405       !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
13406     return SDValue();
13407 
13408   if (CC == ISD::SETUGE) {
13409     std::swap(Op0, Op1);
13410     CC = ISD::SETULT;
13411   }
13412 
13413   if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 ||
13414       Op0.getOpcode() != ISD::BUILD_VECTOR)
13415     return SDValue();
13416 
13417   // Check first operand is BuildVector of 0,1,2,...
13418   for (unsigned I = 0; I < VT.getVectorNumElements(); I++) {
13419     if (!Op0.getOperand(I).isUndef() &&
13420         !(isa<ConstantSDNode>(Op0.getOperand(I)) &&
13421           Op0.getConstantOperandVal(I) == I))
13422       return SDValue();
13423   }
13424 
13425   // The second is a Splat of Op1S
13426   SDValue Op1S = DCI.DAG.getSplatValue(Op1);
13427   if (!Op1S)
13428     return SDValue();
13429 
13430   unsigned Opc;
13431   switch (VT.getVectorNumElements()) {
13432   case 2:
13433     Opc = Intrinsic::arm_mve_vctp64;
13434     break;
13435   case 4:
13436     Opc = Intrinsic::arm_mve_vctp32;
13437     break;
13438   case 8:
13439     Opc = Intrinsic::arm_mve_vctp16;
13440     break;
13441   case 16:
13442     Opc = Intrinsic::arm_mve_vctp8;
13443     break;
13444   default:
13445     return SDValue();
13446   }
13447 
13448   SDLoc DL(N);
13449   return DCI.DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
13450                          DCI.DAG.getConstant(Opc, DL, MVT::i32),
13451                          DCI.DAG.getZExtOrTrunc(Op1S, DL, MVT::i32));
13452 }
13453 
13454 static SDValue PerformABSCombine(SDNode *N,
13455                                  TargetLowering::DAGCombinerInfo &DCI,
13456                                  const ARMSubtarget *Subtarget) {
13457   SelectionDAG &DAG = DCI.DAG;
13458   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13459 
13460   if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
13461     return SDValue();
13462 
13463   return TLI.expandABS(N, DAG);
13464 }
13465 
13466 /// PerformADDECombine - Target-specific dag combine transform from
13467 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
13468 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
13469 static SDValue PerformADDECombine(SDNode *N,
13470                                   TargetLowering::DAGCombinerInfo &DCI,
13471                                   const ARMSubtarget *Subtarget) {
13472   // Only ARM and Thumb2 support UMLAL/SMLAL.
13473   if (Subtarget->isThumb1Only())
13474     return PerformAddeSubeCombine(N, DCI, Subtarget);
13475 
13476   // Only perform the checks after legalize when the pattern is available.
13477   if (DCI.isBeforeLegalize()) return SDValue();
13478 
13479   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
13480 }
13481 
13482 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
13483 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
13484 /// called with the default operands, and if that fails, with commuted
13485 /// operands.
13486 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
13487                                           TargetLowering::DAGCombinerInfo &DCI,
13488                                           const ARMSubtarget *Subtarget){
13489   // Attempt to create vpadd for this add.
13490   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
13491     return Result;
13492 
13493   // Attempt to create vpaddl for this add.
13494   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
13495     return Result;
13496   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
13497                                                       Subtarget))
13498     return Result;
13499 
13500   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
13501   if (N0.getNode()->hasOneUse())
13502     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
13503       return Result;
13504   return SDValue();
13505 }
13506 
13507 static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) {
13508   EVT VT = N->getValueType(0);
13509   SDValue N0 = N->getOperand(0);
13510   SDValue N1 = N->getOperand(1);
13511   SDLoc dl(N);
13512 
13513   auto IsVecReduce = [](SDValue Op) {
13514     switch (Op.getOpcode()) {
13515     case ISD::VECREDUCE_ADD:
13516     case ARMISD::VADDVs:
13517     case ARMISD::VADDVu:
13518     case ARMISD::VMLAVs:
13519     case ARMISD::VMLAVu:
13520       return true;
13521     }
13522     return false;
13523   };
13524 
13525   auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) {
13526     // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) ->
13527     //   add(add(X, vecreduce(Y)), vecreduce(Z))
13528     // to make better use of vaddva style instructions.
13529     if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) &&
13530         IsVecReduce(N1.getOperand(0)) && IsVecReduce(N1.getOperand(1)) &&
13531         !isa<ConstantSDNode>(N0) && N1->hasOneUse()) {
13532       SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0, N1.getOperand(0));
13533       return DAG.getNode(ISD::ADD, dl, VT, Add0, N1.getOperand(1));
13534     }
13535     // And turn add(add(A, reduce(B)), add(C, reduce(D))) ->
13536     //   add(add(add(A, C), reduce(B)), reduce(D))
13537     if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD &&
13538         N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) {
13539       unsigned N0RedOp = 0;
13540       if (!IsVecReduce(N0.getOperand(N0RedOp))) {
13541         N0RedOp = 1;
13542         if (!IsVecReduce(N0.getOperand(N0RedOp)))
13543           return SDValue();
13544       }
13545 
13546       unsigned N1RedOp = 0;
13547       if (!IsVecReduce(N1.getOperand(N1RedOp)))
13548         N1RedOp = 1;
13549       if (!IsVecReduce(N1.getOperand(N1RedOp)))
13550         return SDValue();
13551 
13552       SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0.getOperand(1 - N0RedOp),
13553                                  N1.getOperand(1 - N1RedOp));
13554       SDValue Add1 =
13555           DAG.getNode(ISD::ADD, dl, VT, Add0, N0.getOperand(N0RedOp));
13556       return DAG.getNode(ISD::ADD, dl, VT, Add1, N1.getOperand(N1RedOp));
13557     }
13558     return SDValue();
13559   };
13560   if (SDValue R = DistrubuteAddAddVecReduce(N0, N1))
13561     return R;
13562   if (SDValue R = DistrubuteAddAddVecReduce(N1, N0))
13563     return R;
13564 
13565   // Distribute add(vecreduce(load(Y)), vecreduce(load(Z)))
13566   // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z)))
13567   // by ascending load offsets. This can help cores prefetch if the order of
13568   // loads is more predictable.
13569   auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) {
13570     // Check if two reductions are known to load data where one is before/after
13571     // another. Return negative if N0 loads data before N1, positive if N1 is
13572     // before N0 and 0 otherwise if nothing is known.
13573     auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) {
13574       // Look through to the first operand of a MUL, for the VMLA case.
13575       // Currently only looks at the first operand, in the hope they are equal.
13576       if (N0.getOpcode() == ISD::MUL)
13577         N0 = N0.getOperand(0);
13578       if (N1.getOpcode() == ISD::MUL)
13579         N1 = N1.getOperand(0);
13580 
13581       // Return true if the two operands are loads to the same object and the
13582       // offset of the first is known to be less than the offset of the second.
13583       LoadSDNode *Load0 = dyn_cast<LoadSDNode>(N0);
13584       LoadSDNode *Load1 = dyn_cast<LoadSDNode>(N1);
13585       if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() ||
13586           !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() ||
13587           Load1->isIndexed())
13588         return 0;
13589 
13590       auto BaseLocDecomp0 = BaseIndexOffset::match(Load0, DAG);
13591       auto BaseLocDecomp1 = BaseIndexOffset::match(Load1, DAG);
13592 
13593       if (!BaseLocDecomp0.getBase() ||
13594           BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() ||
13595           !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset())
13596         return 0;
13597       if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset())
13598         return -1;
13599       if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset())
13600         return 1;
13601       return 0;
13602     };
13603 
13604     SDValue X;
13605     if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) {
13606       if (IsVecReduce(N0.getOperand(0)) && IsVecReduce(N0.getOperand(1))) {
13607         int IsBefore = IsKnownOrderedLoad(N0.getOperand(0).getOperand(0),
13608                                          N0.getOperand(1).getOperand(0));
13609         if (IsBefore < 0) {
13610           X = N0.getOperand(0);
13611           N0 = N0.getOperand(1);
13612         } else if (IsBefore > 0) {
13613           X = N0.getOperand(1);
13614           N0 = N0.getOperand(0);
13615         } else
13616           return SDValue();
13617       } else if (IsVecReduce(N0.getOperand(0))) {
13618         X = N0.getOperand(1);
13619         N0 = N0.getOperand(0);
13620       } else if (IsVecReduce(N0.getOperand(1))) {
13621         X = N0.getOperand(0);
13622         N0 = N0.getOperand(1);
13623       } else
13624         return SDValue();
13625     } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) &&
13626                IsKnownOrderedLoad(N0.getOperand(0), N1.getOperand(0)) < 0) {
13627       // Note this is backward to how you would expect. We create
13628       // add(reduce(load + 16), reduce(load + 0)) so that the
13629       // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving
13630       // the X as VADDV(load + 0)
13631       return DAG.getNode(ISD::ADD, dl, VT, N1, N0);
13632     } else
13633       return SDValue();
13634 
13635     if (!IsVecReduce(N0) || !IsVecReduce(N1))
13636       return SDValue();
13637 
13638     if (IsKnownOrderedLoad(N1.getOperand(0), N0.getOperand(0)) >= 0)
13639       return SDValue();
13640 
13641     // Switch from add(add(X, N0), N1) to add(add(X, N1), N0)
13642     SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, X, N1);
13643     return DAG.getNode(ISD::ADD, dl, VT, Add0, N0);
13644   };
13645   if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true))
13646     return R;
13647   if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false))
13648     return R;
13649   return SDValue();
13650 }
13651 
13652 static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG,
13653                                    const ARMSubtarget *Subtarget) {
13654   if (!Subtarget->hasMVEIntegerOps())
13655     return SDValue();
13656 
13657   if (SDValue R = TryDistrubutionADDVecReduce(N, DAG))
13658     return R;
13659 
13660   EVT VT = N->getValueType(0);
13661   SDValue N0 = N->getOperand(0);
13662   SDValue N1 = N->getOperand(1);
13663   SDLoc dl(N);
13664 
13665   if (VT != MVT::i64)
13666     return SDValue();
13667 
13668   // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this
13669   // will look like:
13670   //   t1: i32,i32 = ARMISD::VADDLVs x
13671   //   t2: i64 = build_pair t1, t1:1
13672   //   t3: i64 = add t2, y
13673   // Otherwise we try to push the add up above VADDLVAx, to potentially allow
13674   // the add to be simplified seperately.
13675   // We also need to check for sext / zext and commutitive adds.
13676   auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA,
13677                            SDValue NB) {
13678     if (NB->getOpcode() != ISD::BUILD_PAIR)
13679       return SDValue();
13680     SDValue VecRed = NB->getOperand(0);
13681     if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) ||
13682         VecRed.getResNo() != 0 ||
13683         NB->getOperand(1) != SDValue(VecRed.getNode(), 1))
13684       return SDValue();
13685 
13686     if (VecRed->getOpcode() == OpcodeA) {
13687       // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y)
13688       SDValue Inp = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
13689                                 VecRed.getOperand(0), VecRed.getOperand(1));
13690       NA = DAG.getNode(ISD::ADD, dl, MVT::i64, Inp, NA);
13691     }
13692 
13693     SmallVector<SDValue, 4> Ops(2);
13694     std::tie(Ops[0], Ops[1]) = DAG.SplitScalar(NA, dl, MVT::i32, MVT::i32);
13695 
13696     unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0;
13697     for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++)
13698       Ops.push_back(VecRed->getOperand(I));
13699     SDValue Red =
13700         DAG.getNode(OpcodeA, dl, DAG.getVTList({MVT::i32, MVT::i32}), Ops);
13701     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red,
13702                        SDValue(Red.getNode(), 1));
13703   };
13704 
13705   if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1))
13706     return M;
13707   if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1))
13708     return M;
13709   if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0))
13710     return M;
13711   if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0))
13712     return M;
13713   if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1))
13714     return M;
13715   if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1))
13716     return M;
13717   if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0))
13718     return M;
13719   if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0))
13720     return M;
13721   if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1))
13722     return M;
13723   if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1))
13724     return M;
13725   if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0))
13726     return M;
13727   if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0))
13728     return M;
13729   if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1))
13730     return M;
13731   if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1))
13732     return M;
13733   if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0))
13734     return M;
13735   if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0))
13736     return M;
13737   return SDValue();
13738 }
13739 
13740 bool
13741 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
13742                                                  CombineLevel Level) const {
13743   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
13744           N->getOpcode() == ISD::SRL) &&
13745          "Expected shift op");
13746 
13747   if (Level == BeforeLegalizeTypes)
13748     return true;
13749 
13750   if (N->getOpcode() != ISD::SHL)
13751     return true;
13752 
13753   if (Subtarget->isThumb1Only()) {
13754     // Avoid making expensive immediates by commuting shifts. (This logic
13755     // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
13756     // for free.)
13757     if (N->getOpcode() != ISD::SHL)
13758       return true;
13759     SDValue N1 = N->getOperand(0);
13760     if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
13761         N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
13762       return true;
13763     if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
13764       if (Const->getAPIntValue().ult(256))
13765         return false;
13766       if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
13767           Const->getAPIntValue().sgt(-256))
13768         return false;
13769     }
13770     return true;
13771   }
13772 
13773   // Turn off commute-with-shift transform after legalization, so it doesn't
13774   // conflict with PerformSHLSimplify.  (We could try to detect when
13775   // PerformSHLSimplify would trigger more precisely, but it isn't
13776   // really necessary.)
13777   return false;
13778 }
13779 
13780 bool ARMTargetLowering::isDesirableToCommuteXorWithShift(
13781     const SDNode *N) const {
13782   assert(N->getOpcode() == ISD::XOR &&
13783          (N->getOperand(0).getOpcode() == ISD::SHL ||
13784           N->getOperand(0).getOpcode() == ISD::SRL) &&
13785          "Expected XOR(SHIFT) pattern");
13786 
13787   // Only commute if the entire NOT mask is a hidden shifted mask.
13788   auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
13789   auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13790   if (XorC && ShiftC) {
13791     unsigned MaskIdx, MaskLen;
13792     if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
13793       unsigned ShiftAmt = ShiftC->getZExtValue();
13794       unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
13795       if (N->getOperand(0).getOpcode() == ISD::SHL)
13796         return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
13797       return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
13798     }
13799   }
13800 
13801   return false;
13802 }
13803 
13804 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
13805     const SDNode *N, CombineLevel Level) const {
13806   assert(((N->getOpcode() == ISD::SHL &&
13807            N->getOperand(0).getOpcode() == ISD::SRL) ||
13808           (N->getOpcode() == ISD::SRL &&
13809            N->getOperand(0).getOpcode() == ISD::SHL)) &&
13810          "Expected shift-shift mask");
13811 
13812   if (!Subtarget->isThumb1Only())
13813     return true;
13814 
13815   if (Level == BeforeLegalizeTypes)
13816     return true;
13817 
13818   return false;
13819 }
13820 
13821 bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
13822                                                              EVT VT) const {
13823   return Subtarget->hasMVEIntegerOps() && isTypeLegal(VT);
13824 }
13825 
13826 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
13827   if (!Subtarget->hasNEON()) {
13828     if (Subtarget->isThumb1Only())
13829       return VT.getScalarSizeInBits() <= 32;
13830     return true;
13831   }
13832   return VT.isScalarInteger();
13833 }
13834 
13835 bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
13836                                              EVT VT) const {
13837   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
13838     return false;
13839 
13840   switch (FPVT.getSimpleVT().SimpleTy) {
13841   case MVT::f16:
13842     return Subtarget->hasVFP2Base();
13843   case MVT::f32:
13844     return Subtarget->hasVFP2Base();
13845   case MVT::f64:
13846     return Subtarget->hasFP64();
13847   case MVT::v4f32:
13848   case MVT::v8f16:
13849     return Subtarget->hasMVEFloatOps();
13850   default:
13851     return false;
13852   }
13853 }
13854 
13855 static SDValue PerformSHLSimplify(SDNode *N,
13856                                 TargetLowering::DAGCombinerInfo &DCI,
13857                                 const ARMSubtarget *ST) {
13858   // Allow the generic combiner to identify potential bswaps.
13859   if (DCI.isBeforeLegalize())
13860     return SDValue();
13861 
13862   // DAG combiner will fold:
13863   // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
13864   // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
13865   // Other code patterns that can be also be modified have the following form:
13866   // b + ((a << 1) | 510)
13867   // b + ((a << 1) & 510)
13868   // b + ((a << 1) ^ 510)
13869   // b + ((a << 1) + 510)
13870 
13871   // Many instructions can  perform the shift for free, but it requires both
13872   // the operands to be registers. If c1 << c2 is too large, a mov immediate
13873   // instruction will needed. So, unfold back to the original pattern if:
13874   // - if c1 and c2 are small enough that they don't require mov imms.
13875   // - the user(s) of the node can perform an shl
13876 
13877   // No shifted operands for 16-bit instructions.
13878   if (ST->isThumb() && ST->isThumb1Only())
13879     return SDValue();
13880 
13881   // Check that all the users could perform the shl themselves.
13882   for (auto *U : N->uses()) {
13883     switch(U->getOpcode()) {
13884     default:
13885       return SDValue();
13886     case ISD::SUB:
13887     case ISD::ADD:
13888     case ISD::AND:
13889     case ISD::OR:
13890     case ISD::XOR:
13891     case ISD::SETCC:
13892     case ARMISD::CMP:
13893       // Check that the user isn't already using a constant because there
13894       // aren't any instructions that support an immediate operand and a
13895       // shifted operand.
13896       if (isa<ConstantSDNode>(U->getOperand(0)) ||
13897           isa<ConstantSDNode>(U->getOperand(1)))
13898         return SDValue();
13899 
13900       // Check that it's not already using a shift.
13901       if (U->getOperand(0).getOpcode() == ISD::SHL ||
13902           U->getOperand(1).getOpcode() == ISD::SHL)
13903         return SDValue();
13904       break;
13905     }
13906   }
13907 
13908   if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
13909       N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
13910     return SDValue();
13911 
13912   if (N->getOperand(0).getOpcode() != ISD::SHL)
13913     return SDValue();
13914 
13915   SDValue SHL = N->getOperand(0);
13916 
13917   auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
13918   auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
13919   if (!C1ShlC2 || !C2)
13920     return SDValue();
13921 
13922   APInt C2Int = C2->getAPIntValue();
13923   APInt C1Int = C1ShlC2->getAPIntValue();
13924   unsigned C2Width = C2Int.getBitWidth();
13925   if (C2Int.uge(C2Width))
13926     return SDValue();
13927   uint64_t C2Value = C2Int.getZExtValue();
13928 
13929   // Check that performing a lshr will not lose any information.
13930   APInt Mask = APInt::getHighBitsSet(C2Width, C2Width - C2Value);
13931   if ((C1Int & Mask) != C1Int)
13932     return SDValue();
13933 
13934   // Shift the first constant.
13935   C1Int.lshrInPlace(C2Int);
13936 
13937   // The immediates are encoded as an 8-bit value that can be rotated.
13938   auto LargeImm = [](const APInt &Imm) {
13939     unsigned Zeros = Imm.countl_zero() + Imm.countr_zero();
13940     return Imm.getBitWidth() - Zeros > 8;
13941   };
13942 
13943   if (LargeImm(C1Int) || LargeImm(C2Int))
13944     return SDValue();
13945 
13946   SelectionDAG &DAG = DCI.DAG;
13947   SDLoc dl(N);
13948   SDValue X = SHL.getOperand(0);
13949   SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
13950                               DAG.getConstant(C1Int, dl, MVT::i32));
13951   // Shift left to compensate for the lshr of C1Int.
13952   SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
13953 
13954   LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
13955              SHL.dump(); N->dump());
13956   LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
13957   return Res;
13958 }
13959 
13960 
13961 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
13962 ///
13963 static SDValue PerformADDCombine(SDNode *N,
13964                                  TargetLowering::DAGCombinerInfo &DCI,
13965                                  const ARMSubtarget *Subtarget) {
13966   SDValue N0 = N->getOperand(0);
13967   SDValue N1 = N->getOperand(1);
13968 
13969   // Only works one way, because it needs an immediate operand.
13970   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
13971     return Result;
13972 
13973   if (SDValue Result = PerformADDVecReduce(N, DCI.DAG, Subtarget))
13974     return Result;
13975 
13976   // First try with the default operand order.
13977   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
13978     return Result;
13979 
13980   // If that didn't work, try again with the operands commuted.
13981   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
13982 }
13983 
13984 // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC)
13985 //   providing -X is as cheap as X (currently, just a constant).
13986 static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) {
13987   if (N->getValueType(0) != MVT::i32 || !isNullConstant(N->getOperand(0)))
13988     return SDValue();
13989   SDValue CSINC = N->getOperand(1);
13990   if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse())
13991     return SDValue();
13992 
13993   ConstantSDNode *X = dyn_cast<ConstantSDNode>(CSINC.getOperand(0));
13994   if (!X)
13995     return SDValue();
13996 
13997   return DAG.getNode(ARMISD::CSINV, SDLoc(N), MVT::i32,
13998                      DAG.getNode(ISD::SUB, SDLoc(N), MVT::i32, N->getOperand(0),
13999                                  CSINC.getOperand(0)),
14000                      CSINC.getOperand(1), CSINC.getOperand(2),
14001                      CSINC.getOperand(3));
14002 }
14003 
14004 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
14005 ///
14006 static SDValue PerformSUBCombine(SDNode *N,
14007                                  TargetLowering::DAGCombinerInfo &DCI,
14008                                  const ARMSubtarget *Subtarget) {
14009   SDValue N0 = N->getOperand(0);
14010   SDValue N1 = N->getOperand(1);
14011 
14012   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
14013   if (N1.getNode()->hasOneUse())
14014     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
14015       return Result;
14016 
14017   if (SDValue R = PerformSubCSINCCombine(N, DCI.DAG))
14018     return R;
14019 
14020   if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector())
14021     return SDValue();
14022 
14023   // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x))
14024   // so that we can readily pattern match more mve instructions which can use
14025   // a scalar operand.
14026   SDValue VDup = N->getOperand(1);
14027   if (VDup->getOpcode() != ARMISD::VDUP)
14028     return SDValue();
14029 
14030   SDValue VMov = N->getOperand(0);
14031   if (VMov->getOpcode() == ISD::BITCAST)
14032     VMov = VMov->getOperand(0);
14033 
14034   if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov))
14035     return SDValue();
14036 
14037   SDLoc dl(N);
14038   SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32,
14039                                    DCI.DAG.getConstant(0, dl, MVT::i32),
14040                                    VDup->getOperand(0));
14041   return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate);
14042 }
14043 
14044 /// PerformVMULCombine
14045 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
14046 /// special multiplier accumulator forwarding.
14047 ///   vmul d3, d0, d2
14048 ///   vmla d3, d1, d2
14049 /// is faster than
14050 ///   vadd d3, d0, d1
14051 ///   vmul d3, d3, d2
14052 //  However, for (A + B) * (A + B),
14053 //    vadd d2, d0, d1
14054 //    vmul d3, d0, d2
14055 //    vmla d3, d1, d2
14056 //  is slower than
14057 //    vadd d2, d0, d1
14058 //    vmul d3, d2, d2
14059 static SDValue PerformVMULCombine(SDNode *N,
14060                                   TargetLowering::DAGCombinerInfo &DCI,
14061                                   const ARMSubtarget *Subtarget) {
14062   if (!Subtarget->hasVMLxForwarding())
14063     return SDValue();
14064 
14065   SelectionDAG &DAG = DCI.DAG;
14066   SDValue N0 = N->getOperand(0);
14067   SDValue N1 = N->getOperand(1);
14068   unsigned Opcode = N0.getOpcode();
14069   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
14070       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
14071     Opcode = N1.getOpcode();
14072     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
14073         Opcode != ISD::FADD && Opcode != ISD::FSUB)
14074       return SDValue();
14075     std::swap(N0, N1);
14076   }
14077 
14078   if (N0 == N1)
14079     return SDValue();
14080 
14081   EVT VT = N->getValueType(0);
14082   SDLoc DL(N);
14083   SDValue N00 = N0->getOperand(0);
14084   SDValue N01 = N0->getOperand(1);
14085   return DAG.getNode(Opcode, DL, VT,
14086                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
14087                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
14088 }
14089 
14090 static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
14091                                       const ARMSubtarget *Subtarget) {
14092   EVT VT = N->getValueType(0);
14093   if (VT != MVT::v2i64)
14094     return SDValue();
14095 
14096   SDValue N0 = N->getOperand(0);
14097   SDValue N1 = N->getOperand(1);
14098 
14099   auto IsSignExt = [&](SDValue Op) {
14100     if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG)
14101       return SDValue();
14102     EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT();
14103     if (VT.getScalarSizeInBits() == 32)
14104       return Op->getOperand(0);
14105     return SDValue();
14106   };
14107   auto IsZeroExt = [&](SDValue Op) {
14108     // Zero extends are a little more awkward. At the point we are matching
14109     // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask.
14110     // That might be before of after a bitcast depending on how the and is
14111     // placed. Because this has to look through bitcasts, it is currently only
14112     // supported on LE.
14113     if (!Subtarget->isLittle())
14114       return SDValue();
14115 
14116     SDValue And = Op;
14117     if (And->getOpcode() == ISD::BITCAST)
14118       And = And->getOperand(0);
14119     if (And->getOpcode() != ISD::AND)
14120       return SDValue();
14121     SDValue Mask = And->getOperand(1);
14122     if (Mask->getOpcode() == ISD::BITCAST)
14123       Mask = Mask->getOperand(0);
14124 
14125     if (Mask->getOpcode() != ISD::BUILD_VECTOR ||
14126         Mask.getValueType() != MVT::v4i32)
14127       return SDValue();
14128     if (isAllOnesConstant(Mask->getOperand(0)) &&
14129         isNullConstant(Mask->getOperand(1)) &&
14130         isAllOnesConstant(Mask->getOperand(2)) &&
14131         isNullConstant(Mask->getOperand(3)))
14132       return And->getOperand(0);
14133     return SDValue();
14134   };
14135 
14136   SDLoc dl(N);
14137   if (SDValue Op0 = IsSignExt(N0)) {
14138     if (SDValue Op1 = IsSignExt(N1)) {
14139       SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
14140       SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
14141       return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a);
14142     }
14143   }
14144   if (SDValue Op0 = IsZeroExt(N0)) {
14145     if (SDValue Op1 = IsZeroExt(N1)) {
14146       SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
14147       SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
14148       return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a);
14149     }
14150   }
14151 
14152   return SDValue();
14153 }
14154 
14155 static SDValue PerformMULCombine(SDNode *N,
14156                                  TargetLowering::DAGCombinerInfo &DCI,
14157                                  const ARMSubtarget *Subtarget) {
14158   SelectionDAG &DAG = DCI.DAG;
14159 
14160   EVT VT = N->getValueType(0);
14161   if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64)
14162     return PerformMVEVMULLCombine(N, DAG, Subtarget);
14163 
14164   if (Subtarget->isThumb1Only())
14165     return SDValue();
14166 
14167   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14168     return SDValue();
14169 
14170   if (VT.is64BitVector() || VT.is128BitVector())
14171     return PerformVMULCombine(N, DCI, Subtarget);
14172   if (VT != MVT::i32)
14173     return SDValue();
14174 
14175   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
14176   if (!C)
14177     return SDValue();
14178 
14179   int64_t MulAmt = C->getSExtValue();
14180   unsigned ShiftAmt = llvm::countr_zero<uint64_t>(MulAmt);
14181 
14182   ShiftAmt = ShiftAmt & (32 - 1);
14183   SDValue V = N->getOperand(0);
14184   SDLoc DL(N);
14185 
14186   SDValue Res;
14187   MulAmt >>= ShiftAmt;
14188 
14189   if (MulAmt >= 0) {
14190     if (llvm::has_single_bit<uint32_t>(MulAmt - 1)) {
14191       // (mul x, 2^N + 1) => (add (shl x, N), x)
14192       Res = DAG.getNode(ISD::ADD, DL, VT,
14193                         V,
14194                         DAG.getNode(ISD::SHL, DL, VT,
14195                                     V,
14196                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
14197                                                     MVT::i32)));
14198     } else if (llvm::has_single_bit<uint32_t>(MulAmt + 1)) {
14199       // (mul x, 2^N - 1) => (sub (shl x, N), x)
14200       Res = DAG.getNode(ISD::SUB, DL, VT,
14201                         DAG.getNode(ISD::SHL, DL, VT,
14202                                     V,
14203                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
14204                                                     MVT::i32)),
14205                         V);
14206     } else
14207       return SDValue();
14208   } else {
14209     uint64_t MulAmtAbs = -MulAmt;
14210     if (llvm::has_single_bit<uint32_t>(MulAmtAbs + 1)) {
14211       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
14212       Res = DAG.getNode(ISD::SUB, DL, VT,
14213                         V,
14214                         DAG.getNode(ISD::SHL, DL, VT,
14215                                     V,
14216                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
14217                                                     MVT::i32)));
14218     } else if (llvm::has_single_bit<uint32_t>(MulAmtAbs - 1)) {
14219       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
14220       Res = DAG.getNode(ISD::ADD, DL, VT,
14221                         V,
14222                         DAG.getNode(ISD::SHL, DL, VT,
14223                                     V,
14224                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
14225                                                     MVT::i32)));
14226       Res = DAG.getNode(ISD::SUB, DL, VT,
14227                         DAG.getConstant(0, DL, MVT::i32), Res);
14228     } else
14229       return SDValue();
14230   }
14231 
14232   if (ShiftAmt != 0)
14233     Res = DAG.getNode(ISD::SHL, DL, VT,
14234                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
14235 
14236   // Do not add new nodes to DAG combiner worklist.
14237   DCI.CombineTo(N, Res, false);
14238   return SDValue();
14239 }
14240 
14241 static SDValue CombineANDShift(SDNode *N,
14242                                TargetLowering::DAGCombinerInfo &DCI,
14243                                const ARMSubtarget *Subtarget) {
14244   // Allow DAGCombine to pattern-match before we touch the canonical form.
14245   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14246     return SDValue();
14247 
14248   if (N->getValueType(0) != MVT::i32)
14249     return SDValue();
14250 
14251   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
14252   if (!N1C)
14253     return SDValue();
14254 
14255   uint32_t C1 = (uint32_t)N1C->getZExtValue();
14256   // Don't transform uxtb/uxth.
14257   if (C1 == 255 || C1 == 65535)
14258     return SDValue();
14259 
14260   SDNode *N0 = N->getOperand(0).getNode();
14261   if (!N0->hasOneUse())
14262     return SDValue();
14263 
14264   if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
14265     return SDValue();
14266 
14267   bool LeftShift = N0->getOpcode() == ISD::SHL;
14268 
14269   ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
14270   if (!N01C)
14271     return SDValue();
14272 
14273   uint32_t C2 = (uint32_t)N01C->getZExtValue();
14274   if (!C2 || C2 >= 32)
14275     return SDValue();
14276 
14277   // Clear irrelevant bits in the mask.
14278   if (LeftShift)
14279     C1 &= (-1U << C2);
14280   else
14281     C1 &= (-1U >> C2);
14282 
14283   SelectionDAG &DAG = DCI.DAG;
14284   SDLoc DL(N);
14285 
14286   // We have a pattern of the form "(and (shl x, c2) c1)" or
14287   // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
14288   // transform to a pair of shifts, to save materializing c1.
14289 
14290   // First pattern: right shift, then mask off leading bits.
14291   // FIXME: Use demanded bits?
14292   if (!LeftShift && isMask_32(C1)) {
14293     uint32_t C3 = llvm::countl_zero(C1);
14294     if (C2 < C3) {
14295       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
14296                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
14297       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
14298                          DAG.getConstant(C3, DL, MVT::i32));
14299     }
14300   }
14301 
14302   // First pattern, reversed: left shift, then mask off trailing bits.
14303   if (LeftShift && isMask_32(~C1)) {
14304     uint32_t C3 = llvm::countr_zero(C1);
14305     if (C2 < C3) {
14306       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
14307                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
14308       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
14309                          DAG.getConstant(C3, DL, MVT::i32));
14310     }
14311   }
14312 
14313   // Second pattern: left shift, then mask off leading bits.
14314   // FIXME: Use demanded bits?
14315   if (LeftShift && isShiftedMask_32(C1)) {
14316     uint32_t Trailing = llvm::countr_zero(C1);
14317     uint32_t C3 = llvm::countl_zero(C1);
14318     if (Trailing == C2 && C2 + C3 < 32) {
14319       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
14320                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
14321       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
14322                         DAG.getConstant(C3, DL, MVT::i32));
14323     }
14324   }
14325 
14326   // Second pattern, reversed: right shift, then mask off trailing bits.
14327   // FIXME: Handle other patterns of known/demanded bits.
14328   if (!LeftShift && isShiftedMask_32(C1)) {
14329     uint32_t Leading = llvm::countl_zero(C1);
14330     uint32_t C3 = llvm::countr_zero(C1);
14331     if (Leading == C2 && C2 + C3 < 32) {
14332       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
14333                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
14334       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
14335                          DAG.getConstant(C3, DL, MVT::i32));
14336     }
14337   }
14338 
14339   // FIXME: Transform "(and (shl x, c2) c1)" ->
14340   // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
14341   // c1.
14342   return SDValue();
14343 }
14344 
14345 static SDValue PerformANDCombine(SDNode *N,
14346                                  TargetLowering::DAGCombinerInfo &DCI,
14347                                  const ARMSubtarget *Subtarget) {
14348   // Attempt to use immediate-form VBIC
14349   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
14350   SDLoc dl(N);
14351   EVT VT = N->getValueType(0);
14352   SelectionDAG &DAG = DCI.DAG;
14353 
14354   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 ||
14355       VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1)
14356     return SDValue();
14357 
14358   APInt SplatBits, SplatUndef;
14359   unsigned SplatBitSize;
14360   bool HasAnyUndefs;
14361   if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
14362       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
14363     if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
14364         SplatBitSize == 64) {
14365       EVT VbicVT;
14366       SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(),
14367                                       SplatUndef.getZExtValue(), SplatBitSize,
14368                                       DAG, dl, VbicVT, VT, OtherModImm);
14369       if (Val.getNode()) {
14370         SDValue Input =
14371           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
14372         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
14373         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
14374       }
14375     }
14376   }
14377 
14378   if (!Subtarget->isThumb1Only()) {
14379     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
14380     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
14381       return Result;
14382 
14383     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
14384       return Result;
14385   }
14386 
14387   if (Subtarget->isThumb1Only())
14388     if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
14389       return Result;
14390 
14391   return SDValue();
14392 }
14393 
14394 // Try combining OR nodes to SMULWB, SMULWT.
14395 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
14396                                          TargetLowering::DAGCombinerInfo &DCI,
14397                                          const ARMSubtarget *Subtarget) {
14398   if (!Subtarget->hasV6Ops() ||
14399       (Subtarget->isThumb() &&
14400        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
14401     return SDValue();
14402 
14403   SDValue SRL = OR->getOperand(0);
14404   SDValue SHL = OR->getOperand(1);
14405 
14406   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
14407     SRL = OR->getOperand(1);
14408     SHL = OR->getOperand(0);
14409   }
14410   if (!isSRL16(SRL) || !isSHL16(SHL))
14411     return SDValue();
14412 
14413   // The first operands to the shifts need to be the two results from the
14414   // same smul_lohi node.
14415   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
14416        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
14417     return SDValue();
14418 
14419   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
14420   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
14421       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
14422     return SDValue();
14423 
14424   // Now we have:
14425   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
14426   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
14427   // For SMUWB the 16-bit value will signed extended somehow.
14428   // For SMULWT only the SRA is required.
14429   // Check both sides of SMUL_LOHI
14430   SDValue OpS16 = SMULLOHI->getOperand(0);
14431   SDValue OpS32 = SMULLOHI->getOperand(1);
14432 
14433   SelectionDAG &DAG = DCI.DAG;
14434   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
14435     OpS16 = OpS32;
14436     OpS32 = SMULLOHI->getOperand(0);
14437   }
14438 
14439   SDLoc dl(OR);
14440   unsigned Opcode = 0;
14441   if (isS16(OpS16, DAG))
14442     Opcode = ARMISD::SMULWB;
14443   else if (isSRA16(OpS16)) {
14444     Opcode = ARMISD::SMULWT;
14445     OpS16 = OpS16->getOperand(0);
14446   }
14447   else
14448     return SDValue();
14449 
14450   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
14451   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
14452   return SDValue(OR, 0);
14453 }
14454 
14455 static SDValue PerformORCombineToBFI(SDNode *N,
14456                                      TargetLowering::DAGCombinerInfo &DCI,
14457                                      const ARMSubtarget *Subtarget) {
14458   // BFI is only available on V6T2+
14459   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
14460     return SDValue();
14461 
14462   EVT VT = N->getValueType(0);
14463   SDValue N0 = N->getOperand(0);
14464   SDValue N1 = N->getOperand(1);
14465   SelectionDAG &DAG = DCI.DAG;
14466   SDLoc DL(N);
14467   // 1) or (and A, mask), val => ARMbfi A, val, mask
14468   //      iff (val & mask) == val
14469   //
14470   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
14471   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
14472   //          && mask == ~mask2
14473   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
14474   //          && ~mask == mask2
14475   //  (i.e., copy a bitfield value into another bitfield of the same width)
14476 
14477   if (VT != MVT::i32)
14478     return SDValue();
14479 
14480   SDValue N00 = N0.getOperand(0);
14481 
14482   // The value and the mask need to be constants so we can verify this is
14483   // actually a bitfield set. If the mask is 0xffff, we can do better
14484   // via a movt instruction, so don't use BFI in that case.
14485   SDValue MaskOp = N0.getOperand(1);
14486   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
14487   if (!MaskC)
14488     return SDValue();
14489   unsigned Mask = MaskC->getZExtValue();
14490   if (Mask == 0xffff)
14491     return SDValue();
14492   SDValue Res;
14493   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
14494   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
14495   if (N1C) {
14496     unsigned Val = N1C->getZExtValue();
14497     if ((Val & ~Mask) != Val)
14498       return SDValue();
14499 
14500     if (ARM::isBitFieldInvertedMask(Mask)) {
14501       Val >>= llvm::countr_zero(~Mask);
14502 
14503       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
14504                         DAG.getConstant(Val, DL, MVT::i32),
14505                         DAG.getConstant(Mask, DL, MVT::i32));
14506 
14507       DCI.CombineTo(N, Res, false);
14508       // Return value from the original node to inform the combiner than N is
14509       // now dead.
14510       return SDValue(N, 0);
14511     }
14512   } else if (N1.getOpcode() == ISD::AND) {
14513     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
14514     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
14515     if (!N11C)
14516       return SDValue();
14517     unsigned Mask2 = N11C->getZExtValue();
14518 
14519     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
14520     // as is to match.
14521     if (ARM::isBitFieldInvertedMask(Mask) &&
14522         (Mask == ~Mask2)) {
14523       // The pack halfword instruction works better for masks that fit it,
14524       // so use that when it's available.
14525       if (Subtarget->hasDSP() &&
14526           (Mask == 0xffff || Mask == 0xffff0000))
14527         return SDValue();
14528       // 2a
14529       unsigned amt = llvm::countr_zero(Mask2);
14530       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
14531                         DAG.getConstant(amt, DL, MVT::i32));
14532       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
14533                         DAG.getConstant(Mask, DL, MVT::i32));
14534       DCI.CombineTo(N, Res, false);
14535       // Return value from the original node to inform the combiner than N is
14536       // now dead.
14537       return SDValue(N, 0);
14538     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
14539                (~Mask == Mask2)) {
14540       // The pack halfword instruction works better for masks that fit it,
14541       // so use that when it's available.
14542       if (Subtarget->hasDSP() &&
14543           (Mask2 == 0xffff || Mask2 == 0xffff0000))
14544         return SDValue();
14545       // 2b
14546       unsigned lsb = llvm::countr_zero(Mask);
14547       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
14548                         DAG.getConstant(lsb, DL, MVT::i32));
14549       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
14550                         DAG.getConstant(Mask2, DL, MVT::i32));
14551       DCI.CombineTo(N, Res, false);
14552       // Return value from the original node to inform the combiner than N is
14553       // now dead.
14554       return SDValue(N, 0);
14555     }
14556   }
14557 
14558   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
14559       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
14560       ARM::isBitFieldInvertedMask(~Mask)) {
14561     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
14562     // where lsb(mask) == #shamt and masked bits of B are known zero.
14563     SDValue ShAmt = N00.getOperand(1);
14564     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
14565     unsigned LSB = llvm::countr_zero(Mask);
14566     if (ShAmtC != LSB)
14567       return SDValue();
14568 
14569     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
14570                       DAG.getConstant(~Mask, DL, MVT::i32));
14571 
14572     DCI.CombineTo(N, Res, false);
14573     // Return value from the original node to inform the combiner than N is
14574     // now dead.
14575     return SDValue(N, 0);
14576   }
14577 
14578   return SDValue();
14579 }
14580 
14581 static bool isValidMVECond(unsigned CC, bool IsFloat) {
14582   switch (CC) {
14583   case ARMCC::EQ:
14584   case ARMCC::NE:
14585   case ARMCC::LE:
14586   case ARMCC::GT:
14587   case ARMCC::GE:
14588   case ARMCC::LT:
14589     return true;
14590   case ARMCC::HS:
14591   case ARMCC::HI:
14592     return !IsFloat;
14593   default:
14594     return false;
14595   };
14596 }
14597 
14598 static ARMCC::CondCodes getVCMPCondCode(SDValue N) {
14599   if (N->getOpcode() == ARMISD::VCMP)
14600     return (ARMCC::CondCodes)N->getConstantOperandVal(2);
14601   else if (N->getOpcode() == ARMISD::VCMPZ)
14602     return (ARMCC::CondCodes)N->getConstantOperandVal(1);
14603   else
14604     llvm_unreachable("Not a VCMP/VCMPZ!");
14605 }
14606 
14607 static bool CanInvertMVEVCMP(SDValue N) {
14608   ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N));
14609   return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint());
14610 }
14611 
14612 static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG,
14613                                    const ARMSubtarget *Subtarget) {
14614   // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
14615   // together with predicates
14616   EVT VT = N->getValueType(0);
14617   SDLoc DL(N);
14618   SDValue N0 = N->getOperand(0);
14619   SDValue N1 = N->getOperand(1);
14620 
14621   auto IsFreelyInvertable = [&](SDValue V) {
14622     if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ)
14623       return CanInvertMVEVCMP(V);
14624     return false;
14625   };
14626 
14627   // At least one operand must be freely invertable.
14628   if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1)))
14629     return SDValue();
14630 
14631   SDValue NewN0 = DAG.getLogicalNOT(DL, N0, VT);
14632   SDValue NewN1 = DAG.getLogicalNOT(DL, N1, VT);
14633   SDValue And = DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1);
14634   return DAG.getLogicalNOT(DL, And, VT);
14635 }
14636 
14637 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
14638 static SDValue PerformORCombine(SDNode *N,
14639                                 TargetLowering::DAGCombinerInfo &DCI,
14640                                 const ARMSubtarget *Subtarget) {
14641   // Attempt to use immediate-form VORR
14642   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
14643   SDLoc dl(N);
14644   EVT VT = N->getValueType(0);
14645   SelectionDAG &DAG = DCI.DAG;
14646 
14647   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14648     return SDValue();
14649 
14650   if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 ||
14651                                         VT == MVT::v8i1 || VT == MVT::v16i1))
14652     return PerformORCombine_i1(N, DAG, Subtarget);
14653 
14654   APInt SplatBits, SplatUndef;
14655   unsigned SplatBitSize;
14656   bool HasAnyUndefs;
14657   if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
14658       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
14659     if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
14660         SplatBitSize == 64) {
14661       EVT VorrVT;
14662       SDValue Val =
14663           isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
14664                             SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm);
14665       if (Val.getNode()) {
14666         SDValue Input =
14667           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
14668         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
14669         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
14670       }
14671     }
14672   }
14673 
14674   if (!Subtarget->isThumb1Only()) {
14675     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
14676     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
14677       return Result;
14678     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
14679       return Result;
14680   }
14681 
14682   SDValue N0 = N->getOperand(0);
14683   SDValue N1 = N->getOperand(1);
14684 
14685   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
14686   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
14687       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
14688 
14689     // The code below optimizes (or (and X, Y), Z).
14690     // The AND operand needs to have a single user to make these optimizations
14691     // profitable.
14692     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
14693       return SDValue();
14694 
14695     APInt SplatUndef;
14696     unsigned SplatBitSize;
14697     bool HasAnyUndefs;
14698 
14699     APInt SplatBits0, SplatBits1;
14700     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
14701     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
14702     // Ensure that the second operand of both ands are constants
14703     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
14704                                       HasAnyUndefs) && !HasAnyUndefs) {
14705         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
14706                                           HasAnyUndefs) && !HasAnyUndefs) {
14707             // Ensure that the bit width of the constants are the same and that
14708             // the splat arguments are logical inverses as per the pattern we
14709             // are trying to simplify.
14710             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
14711                 SplatBits0 == ~SplatBits1) {
14712                 // Canonicalize the vector type to make instruction selection
14713                 // simpler.
14714                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
14715                 SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT,
14716                                              N0->getOperand(1),
14717                                              N0->getOperand(0),
14718                                              N1->getOperand(0));
14719                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
14720             }
14721         }
14722     }
14723   }
14724 
14725   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
14726   // reasonable.
14727   if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
14728     if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
14729       return Res;
14730   }
14731 
14732   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
14733     return Result;
14734 
14735   return SDValue();
14736 }
14737 
14738 static SDValue PerformXORCombine(SDNode *N,
14739                                  TargetLowering::DAGCombinerInfo &DCI,
14740                                  const ARMSubtarget *Subtarget) {
14741   EVT VT = N->getValueType(0);
14742   SelectionDAG &DAG = DCI.DAG;
14743 
14744   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14745     return SDValue();
14746 
14747   if (!Subtarget->isThumb1Only()) {
14748     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
14749     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
14750       return Result;
14751 
14752     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
14753       return Result;
14754   }
14755 
14756   if (Subtarget->hasMVEIntegerOps()) {
14757     // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition.
14758     SDValue N0 = N->getOperand(0);
14759     SDValue N1 = N->getOperand(1);
14760     const TargetLowering *TLI = Subtarget->getTargetLowering();
14761     if (TLI->isConstTrueVal(N1) &&
14762         (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) {
14763       if (CanInvertMVEVCMP(N0)) {
14764         SDLoc DL(N0);
14765         ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0));
14766 
14767         SmallVector<SDValue, 4> Ops;
14768         Ops.push_back(N0->getOperand(0));
14769         if (N0->getOpcode() == ARMISD::VCMP)
14770           Ops.push_back(N0->getOperand(1));
14771         Ops.push_back(DAG.getConstant(CC, DL, MVT::i32));
14772         return DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops);
14773       }
14774     }
14775   }
14776 
14777   return SDValue();
14778 }
14779 
14780 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
14781 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
14782 // their position in "to" (Rd).
14783 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
14784   assert(N->getOpcode() == ARMISD::BFI);
14785 
14786   SDValue From = N->getOperand(1);
14787   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
14788   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.popcount());
14789 
14790   // If the Base came from a SHR #C, we can deduce that it is really testing bit
14791   // #C in the base of the SHR.
14792   if (From->getOpcode() == ISD::SRL &&
14793       isa<ConstantSDNode>(From->getOperand(1))) {
14794     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
14795     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
14796     FromMask <<= Shift.getLimitedValue(31);
14797     From = From->getOperand(0);
14798   }
14799 
14800   return From;
14801 }
14802 
14803 // If A and B contain one contiguous set of bits, does A | B == A . B?
14804 //
14805 // Neither A nor B must be zero.
14806 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
14807   unsigned LastActiveBitInA = A.countr_zero();
14808   unsigned FirstActiveBitInB = B.getBitWidth() - B.countl_zero() - 1;
14809   return LastActiveBitInA - 1 == FirstActiveBitInB;
14810 }
14811 
14812 static SDValue FindBFIToCombineWith(SDNode *N) {
14813   // We have a BFI in N. Find a BFI it can combine with, if one exists.
14814   APInt ToMask, FromMask;
14815   SDValue From = ParseBFI(N, ToMask, FromMask);
14816   SDValue To = N->getOperand(0);
14817 
14818   SDValue V = To;
14819   if (V.getOpcode() != ARMISD::BFI)
14820     return SDValue();
14821 
14822   APInt NewToMask, NewFromMask;
14823   SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
14824   if (NewFrom != From)
14825     return SDValue();
14826 
14827   // Do the written bits conflict with any we've seen so far?
14828   if ((NewToMask & ToMask).getBoolValue())
14829     // Conflicting bits.
14830     return SDValue();
14831 
14832   // Are the new bits contiguous when combined with the old bits?
14833   if (BitsProperlyConcatenate(ToMask, NewToMask) &&
14834       BitsProperlyConcatenate(FromMask, NewFromMask))
14835     return V;
14836   if (BitsProperlyConcatenate(NewToMask, ToMask) &&
14837       BitsProperlyConcatenate(NewFromMask, FromMask))
14838     return V;
14839 
14840   return SDValue();
14841 }
14842 
14843 static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) {
14844   SDValue N0 = N->getOperand(0);
14845   SDValue N1 = N->getOperand(1);
14846 
14847   if (N1.getOpcode() == ISD::AND) {
14848     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
14849     // the bits being cleared by the AND are not demanded by the BFI.
14850     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
14851     if (!N11C)
14852       return SDValue();
14853     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
14854     unsigned LSB = llvm::countr_zero(~InvMask);
14855     unsigned Width = llvm::bit_width<unsigned>(~InvMask) - LSB;
14856     assert(Width <
14857                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
14858            "undefined behavior");
14859     unsigned Mask = (1u << Width) - 1;
14860     unsigned Mask2 = N11C->getZExtValue();
14861     if ((Mask & (~Mask2)) == 0)
14862       return DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
14863                          N->getOperand(0), N1.getOperand(0), N->getOperand(2));
14864     return SDValue();
14865   }
14866 
14867   // Look for another BFI to combine with.
14868   if (SDValue CombineBFI = FindBFIToCombineWith(N)) {
14869     // We've found a BFI.
14870     APInt ToMask1, FromMask1;
14871     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
14872 
14873     APInt ToMask2, FromMask2;
14874     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
14875     assert(From1 == From2);
14876     (void)From2;
14877 
14878     // Create a new BFI, combining the two together.
14879     APInt NewFromMask = FromMask1 | FromMask2;
14880     APInt NewToMask = ToMask1 | ToMask2;
14881 
14882     EVT VT = N->getValueType(0);
14883     SDLoc dl(N);
14884 
14885     if (NewFromMask[0] == 0)
14886       From1 = DAG.getNode(ISD::SRL, dl, VT, From1,
14887                           DAG.getConstant(NewFromMask.countr_zero(), dl, VT));
14888     return DAG.getNode(ARMISD::BFI, dl, VT, CombineBFI.getOperand(0), From1,
14889                        DAG.getConstant(~NewToMask, dl, VT));
14890   }
14891 
14892   // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so
14893   // that lower bit insertions are performed first, providing that M1 and M2
14894   // do no overlap. This can allow multiple BFI instructions to be combined
14895   // together by the other folds above.
14896   if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
14897     APInt ToMask1 = ~N->getConstantOperandAPInt(2);
14898     APInt ToMask2 = ~N0.getConstantOperandAPInt(2);
14899 
14900     if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 ||
14901         ToMask1.countl_zero() < ToMask2.countl_zero())
14902       return SDValue();
14903 
14904     EVT VT = N->getValueType(0);
14905     SDLoc dl(N);
14906     SDValue BFI1 = DAG.getNode(ARMISD::BFI, dl, VT, N0.getOperand(0),
14907                                N->getOperand(1), N->getOperand(2));
14908     return DAG.getNode(ARMISD::BFI, dl, VT, BFI1, N0.getOperand(1),
14909                        N0.getOperand(2));
14910   }
14911 
14912   return SDValue();
14913 }
14914 
14915 // Check that N is CMPZ(CSINC(0, 0, CC, X)),
14916 //              or CMPZ(CMOV(1, 0, CC, $cpsr, X))
14917 // return X if valid.
14918 static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) {
14919   if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(Cmp->getOperand(1)))
14920     return SDValue();
14921   SDValue CSInc = Cmp->getOperand(0);
14922 
14923   // Ignore any `And 1` nodes that may not yet have been removed. We are
14924   // looking for a value that produces 1/0, so these have no effect on the
14925   // code.
14926   while (CSInc.getOpcode() == ISD::AND &&
14927          isa<ConstantSDNode>(CSInc.getOperand(1)) &&
14928          CSInc.getConstantOperandVal(1) == 1 && CSInc->hasOneUse())
14929     CSInc = CSInc.getOperand(0);
14930 
14931   if (CSInc.getOpcode() == ARMISD::CSINC &&
14932       isNullConstant(CSInc.getOperand(0)) &&
14933       isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) {
14934     CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2);
14935     return CSInc.getOperand(3);
14936   }
14937   if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(0)) &&
14938       isNullConstant(CSInc.getOperand(1)) && CSInc->hasOneUse()) {
14939     CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2);
14940     return CSInc.getOperand(4);
14941   }
14942   if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(CSInc.getOperand(1)) &&
14943       isNullConstant(CSInc.getOperand(0)) && CSInc->hasOneUse()) {
14944     CC = ARMCC::getOppositeCondition(
14945         (ARMCC::CondCodes)CSInc.getConstantOperandVal(2));
14946     return CSInc.getOperand(4);
14947   }
14948   return SDValue();
14949 }
14950 
14951 static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) {
14952   // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in
14953   //       t92: glue = ARMISD::CMPZ t74, 0
14954   //     t93: i32 = ARMISD::CSINC 0, 0, 1, t92
14955   //   t96: glue = ARMISD::CMPZ t93, 0
14956   // t114: i32 = ARMISD::CSINV 0, 0, 0, t96
14957   ARMCC::CondCodes Cond;
14958   if (SDValue C = IsCMPZCSINC(N, Cond))
14959     if (Cond == ARMCC::EQ)
14960       return C;
14961   return SDValue();
14962 }
14963 
14964 static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) {
14965   // Fold away an unneccessary CMPZ/CSINC
14966   // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) ->
14967   // if C1==EQ -> CSXYZ A, B, C2, D
14968   // if C1==NE -> CSXYZ A, B, NOT(C2), D
14969   ARMCC::CondCodes Cond;
14970   if (SDValue C = IsCMPZCSINC(N->getOperand(3).getNode(), Cond)) {
14971     if (N->getConstantOperandVal(2) == ARMCC::EQ)
14972       return DAG.getNode(N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0),
14973                          N->getOperand(1),
14974                          DAG.getConstant(Cond, SDLoc(N), MVT::i32), C);
14975     if (N->getConstantOperandVal(2) == ARMCC::NE)
14976       return DAG.getNode(
14977           N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0),
14978           N->getOperand(1),
14979           DAG.getConstant(ARMCC::getOppositeCondition(Cond), SDLoc(N), MVT::i32), C);
14980   }
14981   return SDValue();
14982 }
14983 
14984 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
14985 /// ARMISD::VMOVRRD.
14986 static SDValue PerformVMOVRRDCombine(SDNode *N,
14987                                      TargetLowering::DAGCombinerInfo &DCI,
14988                                      const ARMSubtarget *Subtarget) {
14989   // vmovrrd(vmovdrr x, y) -> x,y
14990   SDValue InDouble = N->getOperand(0);
14991   if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
14992     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
14993 
14994   // vmovrrd(load f64) -> (load i32), (load i32)
14995   SDNode *InNode = InDouble.getNode();
14996   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
14997       InNode->getValueType(0) == MVT::f64 &&
14998       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
14999       !cast<LoadSDNode>(InNode)->isVolatile()) {
15000     // TODO: Should this be done for non-FrameIndex operands?
15001     LoadSDNode *LD = cast<LoadSDNode>(InNode);
15002 
15003     SelectionDAG &DAG = DCI.DAG;
15004     SDLoc DL(LD);
15005     SDValue BasePtr = LD->getBasePtr();
15006     SDValue NewLD1 =
15007         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
15008                     LD->getAlign(), LD->getMemOperand()->getFlags());
15009 
15010     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
15011                                     DAG.getConstant(4, DL, MVT::i32));
15012 
15013     SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr,
15014                                  LD->getPointerInfo().getWithOffset(4),
15015                                  commonAlignment(LD->getAlign(), 4),
15016                                  LD->getMemOperand()->getFlags());
15017 
15018     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
15019     if (DCI.DAG.getDataLayout().isBigEndian())
15020       std::swap (NewLD1, NewLD2);
15021     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
15022     return Result;
15023   }
15024 
15025   // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d
15026   // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b
15027   if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15028       isa<ConstantSDNode>(InDouble.getOperand(1))) {
15029     SDValue BV = InDouble.getOperand(0);
15030     // Look up through any nop bitcasts and vector_reg_casts. bitcasts may
15031     // change lane order under big endian.
15032     bool BVSwap = BV.getOpcode() == ISD::BITCAST;
15033     while (
15034         (BV.getOpcode() == ISD::BITCAST ||
15035          BV.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
15036         (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) {
15037       BVSwap = BV.getOpcode() == ISD::BITCAST;
15038       BV = BV.getOperand(0);
15039     }
15040     if (BV.getValueType() != MVT::v4i32)
15041       return SDValue();
15042 
15043     // Handle buildvectors, pulling out the correct lane depending on
15044     // endianness.
15045     unsigned Offset = InDouble.getConstantOperandVal(1) == 1 ? 2 : 0;
15046     if (BV.getOpcode() == ISD::BUILD_VECTOR) {
15047       SDValue Op0 = BV.getOperand(Offset);
15048       SDValue Op1 = BV.getOperand(Offset + 1);
15049       if (!Subtarget->isLittle() && BVSwap)
15050         std::swap(Op0, Op1);
15051 
15052       return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N));
15053     }
15054 
15055     // A chain of insert_vectors, grabbing the correct value of the chain of
15056     // inserts.
15057     SDValue Op0, Op1;
15058     while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) {
15059       if (isa<ConstantSDNode>(BV.getOperand(2))) {
15060         if (BV.getConstantOperandVal(2) == Offset)
15061           Op0 = BV.getOperand(1);
15062         if (BV.getConstantOperandVal(2) == Offset + 1)
15063           Op1 = BV.getOperand(1);
15064       }
15065       BV = BV.getOperand(0);
15066     }
15067     if (!Subtarget->isLittle() && BVSwap)
15068       std::swap(Op0, Op1);
15069     if (Op0 && Op1)
15070       return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N));
15071   }
15072 
15073   return SDValue();
15074 }
15075 
15076 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
15077 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
15078 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
15079   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
15080   SDValue Op0 = N->getOperand(0);
15081   SDValue Op1 = N->getOperand(1);
15082   if (Op0.getOpcode() == ISD::BITCAST)
15083     Op0 = Op0.getOperand(0);
15084   if (Op1.getOpcode() == ISD::BITCAST)
15085     Op1 = Op1.getOperand(0);
15086   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
15087       Op0.getNode() == Op1.getNode() &&
15088       Op0.getResNo() == 0 && Op1.getResNo() == 1)
15089     return DAG.getNode(ISD::BITCAST, SDLoc(N),
15090                        N->getValueType(0), Op0.getOperand(0));
15091   return SDValue();
15092 }
15093 
15094 static SDValue PerformVMOVhrCombine(SDNode *N,
15095                                     TargetLowering::DAGCombinerInfo &DCI) {
15096   SDValue Op0 = N->getOperand(0);
15097 
15098   // VMOVhr (VMOVrh (X)) -> X
15099   if (Op0->getOpcode() == ARMISD::VMOVrh)
15100     return Op0->getOperand(0);
15101 
15102   // FullFP16: half values are passed in S-registers, and we don't
15103   // need any of the bitcast and moves:
15104   //
15105   //     t2: f32,ch1,gl1? = CopyFromReg ch, Register:f32 %0, gl?
15106   //   t5: i32 = bitcast t2
15107   // t18: f16 = ARMISD::VMOVhr t5
15108   // =>
15109   // tN: f16,ch2,gl2? = CopyFromReg ch, Register::f32 %0, gl?
15110   if (Op0->getOpcode() == ISD::BITCAST) {
15111     SDValue Copy = Op0->getOperand(0);
15112     if (Copy.getValueType() == MVT::f32 &&
15113         Copy->getOpcode() == ISD::CopyFromReg) {
15114       bool HasGlue = Copy->getNumOperands() == 3;
15115       SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1),
15116                        HasGlue ? Copy->getOperand(2) : SDValue()};
15117       EVT OutTys[] = {N->getValueType(0), MVT::Other, MVT::Glue};
15118       SDValue NewCopy =
15119           DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N),
15120                           DCI.DAG.getVTList(ArrayRef(OutTys, HasGlue ? 3 : 2)),
15121                           ArrayRef(Ops, HasGlue ? 3 : 2));
15122 
15123       // Update Users, Chains, and Potential Glue.
15124       DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), NewCopy.getValue(0));
15125       DCI.DAG.ReplaceAllUsesOfValueWith(Copy.getValue(1), NewCopy.getValue(1));
15126       if (HasGlue)
15127         DCI.DAG.ReplaceAllUsesOfValueWith(Copy.getValue(2),
15128                                           NewCopy.getValue(2));
15129 
15130       return NewCopy;
15131     }
15132   }
15133 
15134   // fold (VMOVhr (load x)) -> (load (f16*)x)
15135   if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) {
15136     if (LN0->hasOneUse() && LN0->isUnindexed() &&
15137         LN0->getMemoryVT() == MVT::i16) {
15138       SDValue Load =
15139           DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(),
15140                           LN0->getBasePtr(), LN0->getMemOperand());
15141       DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
15142       DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1));
15143       return Load;
15144     }
15145   }
15146 
15147   // Only the bottom 16 bits of the source register are used.
15148   APInt DemandedMask = APInt::getLowBitsSet(32, 16);
15149   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
15150   if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI))
15151     return SDValue(N, 0);
15152 
15153   return SDValue();
15154 }
15155 
15156 static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) {
15157   SDValue N0 = N->getOperand(0);
15158   EVT VT = N->getValueType(0);
15159 
15160   // fold (VMOVrh (fpconst x)) -> const x
15161   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) {
15162     APFloat V = C->getValueAPF();
15163     return DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT);
15164   }
15165 
15166   // fold (VMOVrh (load x)) -> (zextload (i16*)x)
15167   if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
15168     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
15169 
15170     SDValue Load =
15171         DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(),
15172                        LN0->getBasePtr(), MVT::i16, LN0->getMemOperand());
15173     DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
15174     DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
15175     return Load;
15176   }
15177 
15178   // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n)
15179   if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15180       isa<ConstantSDNode>(N0->getOperand(1)))
15181     return DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0),
15182                        N0->getOperand(1));
15183 
15184   return SDValue();
15185 }
15186 
15187 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
15188 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
15189 /// i64 vector to have f64 elements, since the value can then be loaded
15190 /// directly into a VFP register.
15191 static bool hasNormalLoadOperand(SDNode *N) {
15192   unsigned NumElts = N->getValueType(0).getVectorNumElements();
15193   for (unsigned i = 0; i < NumElts; ++i) {
15194     SDNode *Elt = N->getOperand(i).getNode();
15195     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
15196       return true;
15197   }
15198   return false;
15199 }
15200 
15201 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
15202 /// ISD::BUILD_VECTOR.
15203 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
15204                                           TargetLowering::DAGCombinerInfo &DCI,
15205                                           const ARMSubtarget *Subtarget) {
15206   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
15207   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
15208   // into a pair of GPRs, which is fine when the value is used as a scalar,
15209   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
15210   SelectionDAG &DAG = DCI.DAG;
15211   if (N->getNumOperands() == 2)
15212     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
15213       return RV;
15214 
15215   // Load i64 elements as f64 values so that type legalization does not split
15216   // them up into i32 values.
15217   EVT VT = N->getValueType(0);
15218   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
15219     return SDValue();
15220   SDLoc dl(N);
15221   SmallVector<SDValue, 8> Ops;
15222   unsigned NumElts = VT.getVectorNumElements();
15223   for (unsigned i = 0; i < NumElts; ++i) {
15224     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
15225     Ops.push_back(V);
15226     // Make the DAGCombiner fold the bitcast.
15227     DCI.AddToWorklist(V.getNode());
15228   }
15229   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
15230   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
15231   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
15232 }
15233 
15234 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
15235 static SDValue
15236 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
15237   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
15238   // At that time, we may have inserted bitcasts from integer to float.
15239   // If these bitcasts have survived DAGCombine, change the lowering of this
15240   // BUILD_VECTOR in something more vector friendly, i.e., that does not
15241   // force to use floating point types.
15242 
15243   // Make sure we can change the type of the vector.
15244   // This is possible iff:
15245   // 1. The vector is only used in a bitcast to a integer type. I.e.,
15246   //    1.1. Vector is used only once.
15247   //    1.2. Use is a bit convert to an integer type.
15248   // 2. The size of its operands are 32-bits (64-bits are not legal).
15249   EVT VT = N->getValueType(0);
15250   EVT EltVT = VT.getVectorElementType();
15251 
15252   // Check 1.1. and 2.
15253   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
15254     return SDValue();
15255 
15256   // By construction, the input type must be float.
15257   assert(EltVT == MVT::f32 && "Unexpected type!");
15258 
15259   // Check 1.2.
15260   SDNode *Use = *N->use_begin();
15261   if (Use->getOpcode() != ISD::BITCAST ||
15262       Use->getValueType(0).isFloatingPoint())
15263     return SDValue();
15264 
15265   // Check profitability.
15266   // Model is, if more than half of the relevant operands are bitcast from
15267   // i32, turn the build_vector into a sequence of insert_vector_elt.
15268   // Relevant operands are everything that is not statically
15269   // (i.e., at compile time) bitcasted.
15270   unsigned NumOfBitCastedElts = 0;
15271   unsigned NumElts = VT.getVectorNumElements();
15272   unsigned NumOfRelevantElts = NumElts;
15273   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
15274     SDValue Elt = N->getOperand(Idx);
15275     if (Elt->getOpcode() == ISD::BITCAST) {
15276       // Assume only bit cast to i32 will go away.
15277       if (Elt->getOperand(0).getValueType() == MVT::i32)
15278         ++NumOfBitCastedElts;
15279     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
15280       // Constants are statically casted, thus do not count them as
15281       // relevant operands.
15282       --NumOfRelevantElts;
15283   }
15284 
15285   // Check if more than half of the elements require a non-free bitcast.
15286   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
15287     return SDValue();
15288 
15289   SelectionDAG &DAG = DCI.DAG;
15290   // Create the new vector type.
15291   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
15292   // Check if the type is legal.
15293   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15294   if (!TLI.isTypeLegal(VecVT))
15295     return SDValue();
15296 
15297   // Combine:
15298   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
15299   // => BITCAST INSERT_VECTOR_ELT
15300   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
15301   //                      (BITCAST EN), N.
15302   SDValue Vec = DAG.getUNDEF(VecVT);
15303   SDLoc dl(N);
15304   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
15305     SDValue V = N->getOperand(Idx);
15306     if (V.isUndef())
15307       continue;
15308     if (V.getOpcode() == ISD::BITCAST &&
15309         V->getOperand(0).getValueType() == MVT::i32)
15310       // Fold obvious case.
15311       V = V.getOperand(0);
15312     else {
15313       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
15314       // Make the DAGCombiner fold the bitcasts.
15315       DCI.AddToWorklist(V.getNode());
15316     }
15317     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
15318     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
15319   }
15320   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
15321   // Make the DAGCombiner fold the bitcasts.
15322   DCI.AddToWorklist(Vec.getNode());
15323   return Vec;
15324 }
15325 
15326 static SDValue
15327 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
15328   EVT VT = N->getValueType(0);
15329   SDValue Op = N->getOperand(0);
15330   SDLoc dl(N);
15331 
15332   // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
15333   if (Op->getOpcode() == ARMISD::PREDICATE_CAST) {
15334     // If the valuetypes are the same, we can remove the cast entirely.
15335     if (Op->getOperand(0).getValueType() == VT)
15336       return Op->getOperand(0);
15337     return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0));
15338   }
15339 
15340   // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce
15341   // more VPNOT which might get folded as else predicates.
15342   if (Op.getValueType() == MVT::i32 && isBitwiseNot(Op)) {
15343     SDValue X =
15344         DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0));
15345     SDValue C = DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
15346                                 DCI.DAG.getConstant(65535, dl, MVT::i32));
15347     return DCI.DAG.getNode(ISD::XOR, dl, VT, X, C);
15348   }
15349 
15350   // Only the bottom 16 bits of the source register are used.
15351   if (Op.getValueType() == MVT::i32) {
15352     APInt DemandedMask = APInt::getLowBitsSet(32, 16);
15353     const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
15354     if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
15355       return SDValue(N, 0);
15356   }
15357   return SDValue();
15358 }
15359 
15360 static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG,
15361                                              const ARMSubtarget *ST) {
15362   EVT VT = N->getValueType(0);
15363   SDValue Op = N->getOperand(0);
15364   SDLoc dl(N);
15365 
15366   // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST
15367   if (ST->isLittle())
15368     return DAG.getNode(ISD::BITCAST, dl, VT, Op);
15369 
15370   // VECTOR_REG_CAST undef -> undef
15371   if (Op.isUndef())
15372     return DAG.getUNDEF(VT);
15373 
15374   // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
15375   if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) {
15376     // If the valuetypes are the same, we can remove the cast entirely.
15377     if (Op->getOperand(0).getValueType() == VT)
15378       return Op->getOperand(0);
15379     return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0));
15380   }
15381 
15382   return SDValue();
15383 }
15384 
15385 static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG,
15386                                   const ARMSubtarget *Subtarget) {
15387   if (!Subtarget->hasMVEIntegerOps())
15388     return SDValue();
15389 
15390   EVT VT = N->getValueType(0);
15391   SDValue Op0 = N->getOperand(0);
15392   SDValue Op1 = N->getOperand(1);
15393   ARMCC::CondCodes Cond =
15394       (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
15395   SDLoc dl(N);
15396 
15397   // vcmp X, 0, cc -> vcmpz X, cc
15398   if (isZeroVector(Op1))
15399     return DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0, N->getOperand(2));
15400 
15401   unsigned SwappedCond = getSwappedCondition(Cond);
15402   if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) {
15403     // vcmp 0, X, cc -> vcmpz X, reversed(cc)
15404     if (isZeroVector(Op0))
15405       return DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1,
15406                          DAG.getConstant(SwappedCond, dl, MVT::i32));
15407     // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc)
15408     if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP)
15409       return DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0,
15410                          DAG.getConstant(SwappedCond, dl, MVT::i32));
15411   }
15412 
15413   return SDValue();
15414 }
15415 
15416 /// PerformInsertEltCombine - Target-specific dag combine xforms for
15417 /// ISD::INSERT_VECTOR_ELT.
15418 static SDValue PerformInsertEltCombine(SDNode *N,
15419                                        TargetLowering::DAGCombinerInfo &DCI) {
15420   // Bitcast an i64 load inserted into a vector to f64.
15421   // Otherwise, the i64 value will be legalized to a pair of i32 values.
15422   EVT VT = N->getValueType(0);
15423   SDNode *Elt = N->getOperand(1).getNode();
15424   if (VT.getVectorElementType() != MVT::i64 ||
15425       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
15426     return SDValue();
15427 
15428   SelectionDAG &DAG = DCI.DAG;
15429   SDLoc dl(N);
15430   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
15431                                  VT.getVectorNumElements());
15432   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
15433   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
15434   // Make the DAGCombiner fold the bitcasts.
15435   DCI.AddToWorklist(Vec.getNode());
15436   DCI.AddToWorklist(V.getNode());
15437   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
15438                                Vec, V, N->getOperand(2));
15439   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
15440 }
15441 
15442 // Convert a pair of extracts from the same base vector to a VMOVRRD. Either
15443 // directly or bitcast to an integer if the original is a float vector.
15444 // extract(x, n); extract(x, n+1)  ->  VMOVRRD(extract v2f64 x, n/2)
15445 // bitcast(extract(x, n)); bitcast(extract(x, n+1))  ->  VMOVRRD(extract x, n/2)
15446 static SDValue
15447 PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
15448   EVT VT = N->getValueType(0);
15449   SDLoc dl(N);
15450 
15451   if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 ||
15452       !DCI.DAG.getTargetLoweringInfo().isTypeLegal(MVT::f64))
15453     return SDValue();
15454 
15455   SDValue Ext = SDValue(N, 0);
15456   if (Ext.getOpcode() == ISD::BITCAST &&
15457       Ext.getOperand(0).getValueType() == MVT::f32)
15458     Ext = Ext.getOperand(0);
15459   if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15460       !isa<ConstantSDNode>(Ext.getOperand(1)) ||
15461       Ext.getConstantOperandVal(1) % 2 != 0)
15462     return SDValue();
15463   if (Ext->use_size() == 1 &&
15464       (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP ||
15465        Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP))
15466     return SDValue();
15467 
15468   SDValue Op0 = Ext.getOperand(0);
15469   EVT VecVT = Op0.getValueType();
15470   unsigned ResNo = Op0.getResNo();
15471   unsigned Lane = Ext.getConstantOperandVal(1);
15472   if (VecVT.getVectorNumElements() != 4)
15473     return SDValue();
15474 
15475   // Find another extract, of Lane + 1
15476   auto OtherIt = find_if(Op0->uses(), [&](SDNode *V) {
15477     return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15478            isa<ConstantSDNode>(V->getOperand(1)) &&
15479            V->getConstantOperandVal(1) == Lane + 1 &&
15480            V->getOperand(0).getResNo() == ResNo;
15481   });
15482   if (OtherIt == Op0->uses().end())
15483     return SDValue();
15484 
15485   // For float extracts, we need to be converting to a i32 for both vector
15486   // lanes.
15487   SDValue OtherExt(*OtherIt, 0);
15488   if (OtherExt.getValueType() != MVT::i32) {
15489     if (OtherExt->use_size() != 1 ||
15490         OtherExt->use_begin()->getOpcode() != ISD::BITCAST ||
15491         OtherExt->use_begin()->getValueType(0) != MVT::i32)
15492       return SDValue();
15493     OtherExt = SDValue(*OtherExt->use_begin(), 0);
15494   }
15495 
15496   // Convert the type to a f64 and extract with a VMOVRRD.
15497   SDValue F64 = DCI.DAG.getNode(
15498       ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
15499       DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v2f64, Op0),
15500       DCI.DAG.getConstant(Ext.getConstantOperandVal(1) / 2, dl, MVT::i32));
15501   SDValue VMOVRRD =
15502       DCI.DAG.getNode(ARMISD::VMOVRRD, dl, {MVT::i32, MVT::i32}, F64);
15503 
15504   DCI.CombineTo(OtherExt.getNode(), SDValue(VMOVRRD.getNode(), 1));
15505   return VMOVRRD;
15506 }
15507 
15508 static SDValue PerformExtractEltCombine(SDNode *N,
15509                                         TargetLowering::DAGCombinerInfo &DCI,
15510                                         const ARMSubtarget *ST) {
15511   SDValue Op0 = N->getOperand(0);
15512   EVT VT = N->getValueType(0);
15513   SDLoc dl(N);
15514 
15515   // extract (vdup x) -> x
15516   if (Op0->getOpcode() == ARMISD::VDUP) {
15517     SDValue X = Op0->getOperand(0);
15518     if (VT == MVT::f16 && X.getValueType() == MVT::i32)
15519       return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X);
15520     if (VT == MVT::i32 && X.getValueType() == MVT::f16)
15521       return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X);
15522     if (VT == MVT::f32 && X.getValueType() == MVT::i32)
15523       return DCI.DAG.getNode(ISD::BITCAST, dl, VT, X);
15524 
15525     while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST)
15526       X = X->getOperand(0);
15527     if (X.getValueType() == VT)
15528       return X;
15529   }
15530 
15531   // extract ARM_BUILD_VECTOR -> x
15532   if (Op0->getOpcode() == ARMISD::BUILD_VECTOR &&
15533       isa<ConstantSDNode>(N->getOperand(1)) &&
15534       N->getConstantOperandVal(1) < Op0.getNumOperands()) {
15535     return Op0.getOperand(N->getConstantOperandVal(1));
15536   }
15537 
15538   // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b
15539   if (Op0.getValueType() == MVT::v4i32 &&
15540       isa<ConstantSDNode>(N->getOperand(1)) &&
15541       Op0.getOpcode() == ISD::BITCAST &&
15542       Op0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
15543       Op0.getOperand(0).getValueType() == MVT::v2f64) {
15544     SDValue BV = Op0.getOperand(0);
15545     unsigned Offset = N->getConstantOperandVal(1);
15546     SDValue MOV = BV.getOperand(Offset < 2 ? 0 : 1);
15547     if (MOV.getOpcode() == ARMISD::VMOVDRR)
15548       return MOV.getOperand(ST->isLittle() ? Offset % 2 : 1 - Offset % 2);
15549   }
15550 
15551   // extract x, n; extract x, n+1  ->  VMOVRRD x
15552   if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI))
15553     return R;
15554 
15555   // extract (MVETrunc(x)) -> extract x
15556   if (Op0->getOpcode() == ARMISD::MVETRUNC) {
15557     unsigned Idx = N->getConstantOperandVal(1);
15558     unsigned Vec =
15559         Idx / Op0->getOperand(0).getValueType().getVectorNumElements();
15560     unsigned SubIdx =
15561         Idx % Op0->getOperand(0).getValueType().getVectorNumElements();
15562     return DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Op0.getOperand(Vec),
15563                            DCI.DAG.getConstant(SubIdx, dl, MVT::i32));
15564   }
15565 
15566   return SDValue();
15567 }
15568 
15569 static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) {
15570   SDValue Op = N->getOperand(0);
15571   EVT VT = N->getValueType(0);
15572 
15573   // sext_inreg(VGETLANEu) -> VGETLANEs
15574   if (Op.getOpcode() == ARMISD::VGETLANEu &&
15575       cast<VTSDNode>(N->getOperand(1))->getVT() ==
15576           Op.getOperand(0).getValueType().getScalarType())
15577     return DAG.getNode(ARMISD::VGETLANEs, SDLoc(N), VT, Op.getOperand(0),
15578                        Op.getOperand(1));
15579 
15580   return SDValue();
15581 }
15582 
15583 static SDValue
15584 PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
15585   SDValue Vec = N->getOperand(0);
15586   SDValue SubVec = N->getOperand(1);
15587   uint64_t IdxVal = N->getConstantOperandVal(2);
15588   EVT VecVT = Vec.getValueType();
15589   EVT SubVT = SubVec.getValueType();
15590 
15591   // Only do this for legal fixed vector types.
15592   if (!VecVT.isFixedLengthVector() ||
15593       !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
15594       !DCI.DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
15595     return SDValue();
15596 
15597   // Ignore widening patterns.
15598   if (IdxVal == 0 && Vec.isUndef())
15599     return SDValue();
15600 
15601   // Subvector must be half the width and an "aligned" insertion.
15602   unsigned NumSubElts = SubVT.getVectorNumElements();
15603   if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
15604       (IdxVal != 0 && IdxVal != NumSubElts))
15605     return SDValue();
15606 
15607   // Fold insert_subvector -> concat_vectors
15608   // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
15609   // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
15610   SDLoc DL(N);
15611   SDValue Lo, Hi;
15612   if (IdxVal == 0) {
15613     Lo = SubVec;
15614     Hi = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15615                          DCI.DAG.getVectorIdxConstant(NumSubElts, DL));
15616   } else {
15617     Lo = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15618                          DCI.DAG.getVectorIdxConstant(0, DL));
15619     Hi = SubVec;
15620   }
15621   return DCI.DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
15622 }
15623 
15624 // shuffle(MVETrunc(x, y)) -> VMOVN(x, y)
15625 static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
15626                                           SelectionDAG &DAG) {
15627   SDValue Trunc = N->getOperand(0);
15628   EVT VT = Trunc.getValueType();
15629   if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(1).isUndef())
15630     return SDValue();
15631 
15632   SDLoc DL(Trunc);
15633   if (isVMOVNTruncMask(N->getMask(), VT, false))
15634     return DAG.getNode(
15635         ARMISD::VMOVN, DL, VT,
15636         DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(0)),
15637         DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)),
15638         DAG.getConstant(1, DL, MVT::i32));
15639   else if (isVMOVNTruncMask(N->getMask(), VT, true))
15640     return DAG.getNode(
15641         ARMISD::VMOVN, DL, VT,
15642         DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(1)),
15643         DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, Trunc.getOperand(0)),
15644         DAG.getConstant(1, DL, MVT::i32));
15645   return SDValue();
15646 }
15647 
15648 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
15649 /// ISD::VECTOR_SHUFFLE.
15650 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
15651   if (SDValue R = PerformShuffleVMOVNCombine(cast<ShuffleVectorSDNode>(N), DAG))
15652     return R;
15653 
15654   // The LLVM shufflevector instruction does not require the shuffle mask
15655   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
15656   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
15657   // operands do not match the mask length, they are extended by concatenating
15658   // them with undef vectors.  That is probably the right thing for other
15659   // targets, but for NEON it is better to concatenate two double-register
15660   // size vector operands into a single quad-register size vector.  Do that
15661   // transformation here:
15662   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
15663   //   shuffle(concat(v1, v2), undef)
15664   SDValue Op0 = N->getOperand(0);
15665   SDValue Op1 = N->getOperand(1);
15666   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
15667       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
15668       Op0.getNumOperands() != 2 ||
15669       Op1.getNumOperands() != 2)
15670     return SDValue();
15671   SDValue Concat0Op1 = Op0.getOperand(1);
15672   SDValue Concat1Op1 = Op1.getOperand(1);
15673   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
15674     return SDValue();
15675   // Skip the transformation if any of the types are illegal.
15676   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15677   EVT VT = N->getValueType(0);
15678   if (!TLI.isTypeLegal(VT) ||
15679       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
15680       !TLI.isTypeLegal(Concat1Op1.getValueType()))
15681     return SDValue();
15682 
15683   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
15684                                   Op0.getOperand(0), Op1.getOperand(0));
15685   // Translate the shuffle mask.
15686   SmallVector<int, 16> NewMask;
15687   unsigned NumElts = VT.getVectorNumElements();
15688   unsigned HalfElts = NumElts/2;
15689   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
15690   for (unsigned n = 0; n < NumElts; ++n) {
15691     int MaskElt = SVN->getMaskElt(n);
15692     int NewElt = -1;
15693     if (MaskElt < (int)HalfElts)
15694       NewElt = MaskElt;
15695     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
15696       NewElt = HalfElts + MaskElt - NumElts;
15697     NewMask.push_back(NewElt);
15698   }
15699   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
15700                               DAG.getUNDEF(VT), NewMask);
15701 }
15702 
15703 /// Load/store instruction that can be merged with a base address
15704 /// update
15705 struct BaseUpdateTarget {
15706   SDNode *N;
15707   bool isIntrinsic;
15708   bool isStore;
15709   unsigned AddrOpIdx;
15710 };
15711 
15712 struct BaseUpdateUser {
15713   /// Instruction that updates a pointer
15714   SDNode *N;
15715   /// Pointer increment operand
15716   SDValue Inc;
15717   /// Pointer increment value if it is a constant, or 0 otherwise
15718   unsigned ConstInc;
15719 };
15720 
15721 static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target,
15722                                  struct BaseUpdateUser &User,
15723                                  bool SimpleConstIncOnly,
15724                                  TargetLowering::DAGCombinerInfo &DCI) {
15725   SelectionDAG &DAG = DCI.DAG;
15726   SDNode *N = Target.N;
15727   MemSDNode *MemN = cast<MemSDNode>(N);
15728   SDLoc dl(N);
15729 
15730   // Find the new opcode for the updating load/store.
15731   bool isLoadOp = true;
15732   bool isLaneOp = false;
15733   // Workaround for vst1x and vld1x intrinsics which do not have alignment
15734   // as an operand.
15735   bool hasAlignment = true;
15736   unsigned NewOpc = 0;
15737   unsigned NumVecs = 0;
15738   if (Target.isIntrinsic) {
15739     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
15740     switch (IntNo) {
15741     default:
15742       llvm_unreachable("unexpected intrinsic for Neon base update");
15743     case Intrinsic::arm_neon_vld1:
15744       NewOpc = ARMISD::VLD1_UPD;
15745       NumVecs = 1;
15746       break;
15747     case Intrinsic::arm_neon_vld2:
15748       NewOpc = ARMISD::VLD2_UPD;
15749       NumVecs = 2;
15750       break;
15751     case Intrinsic::arm_neon_vld3:
15752       NewOpc = ARMISD::VLD3_UPD;
15753       NumVecs = 3;
15754       break;
15755     case Intrinsic::arm_neon_vld4:
15756       NewOpc = ARMISD::VLD4_UPD;
15757       NumVecs = 4;
15758       break;
15759     case Intrinsic::arm_neon_vld1x2:
15760       NewOpc = ARMISD::VLD1x2_UPD;
15761       NumVecs = 2;
15762       hasAlignment = false;
15763       break;
15764     case Intrinsic::arm_neon_vld1x3:
15765       NewOpc = ARMISD::VLD1x3_UPD;
15766       NumVecs = 3;
15767       hasAlignment = false;
15768       break;
15769     case Intrinsic::arm_neon_vld1x4:
15770       NewOpc = ARMISD::VLD1x4_UPD;
15771       NumVecs = 4;
15772       hasAlignment = false;
15773       break;
15774     case Intrinsic::arm_neon_vld2dup:
15775       NewOpc = ARMISD::VLD2DUP_UPD;
15776       NumVecs = 2;
15777       break;
15778     case Intrinsic::arm_neon_vld3dup:
15779       NewOpc = ARMISD::VLD3DUP_UPD;
15780       NumVecs = 3;
15781       break;
15782     case Intrinsic::arm_neon_vld4dup:
15783       NewOpc = ARMISD::VLD4DUP_UPD;
15784       NumVecs = 4;
15785       break;
15786     case Intrinsic::arm_neon_vld2lane:
15787       NewOpc = ARMISD::VLD2LN_UPD;
15788       NumVecs = 2;
15789       isLaneOp = true;
15790       break;
15791     case Intrinsic::arm_neon_vld3lane:
15792       NewOpc = ARMISD::VLD3LN_UPD;
15793       NumVecs = 3;
15794       isLaneOp = true;
15795       break;
15796     case Intrinsic::arm_neon_vld4lane:
15797       NewOpc = ARMISD::VLD4LN_UPD;
15798       NumVecs = 4;
15799       isLaneOp = true;
15800       break;
15801     case Intrinsic::arm_neon_vst1:
15802       NewOpc = ARMISD::VST1_UPD;
15803       NumVecs = 1;
15804       isLoadOp = false;
15805       break;
15806     case Intrinsic::arm_neon_vst2:
15807       NewOpc = ARMISD::VST2_UPD;
15808       NumVecs = 2;
15809       isLoadOp = false;
15810       break;
15811     case Intrinsic::arm_neon_vst3:
15812       NewOpc = ARMISD::VST3_UPD;
15813       NumVecs = 3;
15814       isLoadOp = false;
15815       break;
15816     case Intrinsic::arm_neon_vst4:
15817       NewOpc = ARMISD::VST4_UPD;
15818       NumVecs = 4;
15819       isLoadOp = false;
15820       break;
15821     case Intrinsic::arm_neon_vst2lane:
15822       NewOpc = ARMISD::VST2LN_UPD;
15823       NumVecs = 2;
15824       isLoadOp = false;
15825       isLaneOp = true;
15826       break;
15827     case Intrinsic::arm_neon_vst3lane:
15828       NewOpc = ARMISD::VST3LN_UPD;
15829       NumVecs = 3;
15830       isLoadOp = false;
15831       isLaneOp = true;
15832       break;
15833     case Intrinsic::arm_neon_vst4lane:
15834       NewOpc = ARMISD::VST4LN_UPD;
15835       NumVecs = 4;
15836       isLoadOp = false;
15837       isLaneOp = true;
15838       break;
15839     case Intrinsic::arm_neon_vst1x2:
15840       NewOpc = ARMISD::VST1x2_UPD;
15841       NumVecs = 2;
15842       isLoadOp = false;
15843       hasAlignment = false;
15844       break;
15845     case Intrinsic::arm_neon_vst1x3:
15846       NewOpc = ARMISD::VST1x3_UPD;
15847       NumVecs = 3;
15848       isLoadOp = false;
15849       hasAlignment = false;
15850       break;
15851     case Intrinsic::arm_neon_vst1x4:
15852       NewOpc = ARMISD::VST1x4_UPD;
15853       NumVecs = 4;
15854       isLoadOp = false;
15855       hasAlignment = false;
15856       break;
15857     }
15858   } else {
15859     isLaneOp = true;
15860     switch (N->getOpcode()) {
15861     default:
15862       llvm_unreachable("unexpected opcode for Neon base update");
15863     case ARMISD::VLD1DUP:
15864       NewOpc = ARMISD::VLD1DUP_UPD;
15865       NumVecs = 1;
15866       break;
15867     case ARMISD::VLD2DUP:
15868       NewOpc = ARMISD::VLD2DUP_UPD;
15869       NumVecs = 2;
15870       break;
15871     case ARMISD::VLD3DUP:
15872       NewOpc = ARMISD::VLD3DUP_UPD;
15873       NumVecs = 3;
15874       break;
15875     case ARMISD::VLD4DUP:
15876       NewOpc = ARMISD::VLD4DUP_UPD;
15877       NumVecs = 4;
15878       break;
15879     case ISD::LOAD:
15880       NewOpc = ARMISD::VLD1_UPD;
15881       NumVecs = 1;
15882       isLaneOp = false;
15883       break;
15884     case ISD::STORE:
15885       NewOpc = ARMISD::VST1_UPD;
15886       NumVecs = 1;
15887       isLaneOp = false;
15888       isLoadOp = false;
15889       break;
15890     }
15891   }
15892 
15893   // Find the size of memory referenced by the load/store.
15894   EVT VecTy;
15895   if (isLoadOp) {
15896     VecTy = N->getValueType(0);
15897   } else if (Target.isIntrinsic) {
15898     VecTy = N->getOperand(Target.AddrOpIdx + 1).getValueType();
15899   } else {
15900     assert(Target.isStore &&
15901            "Node has to be a load, a store, or an intrinsic!");
15902     VecTy = N->getOperand(1).getValueType();
15903   }
15904 
15905   bool isVLDDUPOp =
15906       NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD ||
15907       NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD;
15908 
15909   unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
15910   if (isLaneOp || isVLDDUPOp)
15911     NumBytes /= VecTy.getVectorNumElements();
15912 
15913   if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) {
15914     // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
15915     // separate instructions that make it harder to use a non-constant update.
15916     return false;
15917   }
15918 
15919   if (SimpleConstIncOnly && User.ConstInc != NumBytes)
15920     return false;
15921 
15922   // OK, we found an ADD we can fold into the base update.
15923   // Now, create a _UPD node, taking care of not breaking alignment.
15924 
15925   EVT AlignedVecTy = VecTy;
15926   Align Alignment = MemN->getAlign();
15927 
15928   // If this is a less-than-standard-aligned load/store, change the type to
15929   // match the standard alignment.
15930   // The alignment is overlooked when selecting _UPD variants; and it's
15931   // easier to introduce bitcasts here than fix that.
15932   // There are 3 ways to get to this base-update combine:
15933   // - intrinsics: they are assumed to be properly aligned (to the standard
15934   //   alignment of the memory type), so we don't need to do anything.
15935   // - ARMISD::VLDx nodes: they are only generated from the aforementioned
15936   //   intrinsics, so, likewise, there's nothing to do.
15937   // - generic load/store instructions: the alignment is specified as an
15938   //   explicit operand, rather than implicitly as the standard alignment
15939   //   of the memory type (like the intrisics).  We need to change the
15940   //   memory type to match the explicit alignment.  That way, we don't
15941   //   generate non-standard-aligned ARMISD::VLDx nodes.
15942   if (isa<LSBaseSDNode>(N)) {
15943     if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) {
15944       MVT EltTy = MVT::getIntegerVT(Alignment.value() * 8);
15945       assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
15946       assert(!isLaneOp && "Unexpected generic load/store lane.");
15947       unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
15948       AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
15949     }
15950     // Don't set an explicit alignment on regular load/stores that we want
15951     // to transform to VLD/VST 1_UPD nodes.
15952     // This matches the behavior of regular load/stores, which only get an
15953     // explicit alignment if the MMO alignment is larger than the standard
15954     // alignment of the memory type.
15955     // Intrinsics, however, always get an explicit alignment, set to the
15956     // alignment of the MMO.
15957     Alignment = Align(1);
15958   }
15959 
15960   // Create the new updating load/store node.
15961   // First, create an SDVTList for the new updating node's results.
15962   EVT Tys[6];
15963   unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
15964   unsigned n;
15965   for (n = 0; n < NumResultVecs; ++n)
15966     Tys[n] = AlignedVecTy;
15967   Tys[n++] = MVT::i32;
15968   Tys[n] = MVT::Other;
15969   SDVTList SDTys = DAG.getVTList(ArrayRef(Tys, NumResultVecs + 2));
15970 
15971   // Then, gather the new node's operands.
15972   SmallVector<SDValue, 8> Ops;
15973   Ops.push_back(N->getOperand(0)); // incoming chain
15974   Ops.push_back(N->getOperand(Target.AddrOpIdx));
15975   Ops.push_back(User.Inc);
15976 
15977   if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
15978     // Try to match the intrinsic's signature
15979     Ops.push_back(StN->getValue());
15980   } else {
15981     // Loads (and of course intrinsics) match the intrinsics' signature,
15982     // so just add all but the alignment operand.
15983     unsigned LastOperand =
15984         hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands();
15985     for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i)
15986       Ops.push_back(N->getOperand(i));
15987   }
15988 
15989   // For all node types, the alignment operand is always the last one.
15990   Ops.push_back(DAG.getConstant(Alignment.value(), dl, MVT::i32));
15991 
15992   // If this is a non-standard-aligned STORE, the penultimate operand is the
15993   // stored value.  Bitcast it to the aligned type.
15994   if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
15995     SDValue &StVal = Ops[Ops.size() - 2];
15996     StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
15997   }
15998 
15999   EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
16000   SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
16001                                          MemN->getMemOperand());
16002 
16003   // Update the uses.
16004   SmallVector<SDValue, 5> NewResults;
16005   for (unsigned i = 0; i < NumResultVecs; ++i)
16006     NewResults.push_back(SDValue(UpdN.getNode(), i));
16007 
16008   // If this is an non-standard-aligned LOAD, the first result is the loaded
16009   // value.  Bitcast it to the expected result type.
16010   if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
16011     SDValue &LdVal = NewResults[0];
16012     LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
16013   }
16014 
16015   NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
16016   DCI.CombineTo(N, NewResults);
16017   DCI.CombineTo(User.N, SDValue(UpdN.getNode(), NumResultVecs));
16018 
16019   return true;
16020 }
16021 
16022 // If (opcode ptr inc) is and ADD-like instruction, return the
16023 // increment value. Otherwise return 0.
16024 static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr,
16025                                          SDValue Inc, const SelectionDAG &DAG) {
16026   ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
16027   if (!CInc)
16028     return 0;
16029 
16030   switch (Opcode) {
16031   case ARMISD::VLD1_UPD:
16032   case ISD::ADD:
16033     return CInc->getZExtValue();
16034   case ISD::OR: {
16035     if (DAG.haveNoCommonBitsSet(Ptr, Inc)) {
16036       // (OR ptr inc) is the same as (ADD ptr inc)
16037       return CInc->getZExtValue();
16038     }
16039     return 0;
16040   }
16041   default:
16042     return 0;
16043   }
16044 }
16045 
16046 static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) {
16047   switch (N->getOpcode()) {
16048   case ISD::ADD:
16049   case ISD::OR: {
16050     if (isa<ConstantSDNode>(N->getOperand(1))) {
16051       *Ptr = N->getOperand(0);
16052       *CInc = N->getOperand(1);
16053       return true;
16054     }
16055     return false;
16056   }
16057   case ARMISD::VLD1_UPD: {
16058     if (isa<ConstantSDNode>(N->getOperand(2))) {
16059       *Ptr = N->getOperand(1);
16060       *CInc = N->getOperand(2);
16061       return true;
16062     }
16063     return false;
16064   }
16065   default:
16066     return false;
16067   }
16068 }
16069 
16070 static bool isValidBaseUpdate(SDNode *N, SDNode *User) {
16071   // Check that the add is independent of the load/store.
16072   // Otherwise, folding it would create a cycle. Search through Addr
16073   // as well, since the User may not be a direct user of Addr and
16074   // only share a base pointer.
16075   SmallPtrSet<const SDNode *, 32> Visited;
16076   SmallVector<const SDNode *, 16> Worklist;
16077   Worklist.push_back(N);
16078   Worklist.push_back(User);
16079   if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
16080       SDNode::hasPredecessorHelper(User, Visited, Worklist))
16081     return false;
16082   return true;
16083 }
16084 
16085 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
16086 /// NEON load/store intrinsics, and generic vector load/stores, to merge
16087 /// base address updates.
16088 /// For generic load/stores, the memory type is assumed to be a vector.
16089 /// The caller is assumed to have checked legality.
16090 static SDValue CombineBaseUpdate(SDNode *N,
16091                                  TargetLowering::DAGCombinerInfo &DCI) {
16092   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
16093                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
16094   const bool isStore = N->getOpcode() == ISD::STORE;
16095   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
16096   BaseUpdateTarget Target = {N, isIntrinsic, isStore, AddrOpIdx};
16097 
16098   SDValue Addr = N->getOperand(AddrOpIdx);
16099 
16100   SmallVector<BaseUpdateUser, 8> BaseUpdates;
16101 
16102   // Search for a use of the address operand that is an increment.
16103   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
16104          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
16105     SDNode *User = *UI;
16106     if (UI.getUse().getResNo() != Addr.getResNo() ||
16107         User->getNumOperands() != 2)
16108       continue;
16109 
16110     SDValue Inc = User->getOperand(UI.getOperandNo() == 1 ? 0 : 1);
16111     unsigned ConstInc =
16112         getPointerConstIncrement(User->getOpcode(), Addr, Inc, DCI.DAG);
16113 
16114     if (ConstInc || User->getOpcode() == ISD::ADD)
16115       BaseUpdates.push_back({User, Inc, ConstInc});
16116   }
16117 
16118   // If the address is a constant pointer increment itself, find
16119   // another constant increment that has the same base operand
16120   SDValue Base;
16121   SDValue CInc;
16122   if (findPointerConstIncrement(Addr.getNode(), &Base, &CInc)) {
16123     unsigned Offset =
16124         getPointerConstIncrement(Addr->getOpcode(), Base, CInc, DCI.DAG);
16125     for (SDNode::use_iterator UI = Base->use_begin(), UE = Base->use_end();
16126          UI != UE; ++UI) {
16127 
16128       SDNode *User = *UI;
16129       if (UI.getUse().getResNo() != Base.getResNo() || User == Addr.getNode() ||
16130           User->getNumOperands() != 2)
16131         continue;
16132 
16133       SDValue UserInc = User->getOperand(UI.getOperandNo() == 0 ? 1 : 0);
16134       unsigned UserOffset =
16135           getPointerConstIncrement(User->getOpcode(), Base, UserInc, DCI.DAG);
16136 
16137       if (!UserOffset || UserOffset <= Offset)
16138         continue;
16139 
16140       unsigned NewConstInc = UserOffset - Offset;
16141       SDValue NewInc = DCI.DAG.getConstant(NewConstInc, SDLoc(N), MVT::i32);
16142       BaseUpdates.push_back({User, NewInc, NewConstInc});
16143     }
16144   }
16145 
16146   // Try to fold the load/store with an update that matches memory
16147   // access size. This should work well for sequential loads.
16148   //
16149   // Filter out invalid updates as well.
16150   unsigned NumValidUpd = BaseUpdates.size();
16151   for (unsigned I = 0; I < NumValidUpd;) {
16152     BaseUpdateUser &User = BaseUpdates[I];
16153     if (!isValidBaseUpdate(N, User.N)) {
16154       --NumValidUpd;
16155       std::swap(BaseUpdates[I], BaseUpdates[NumValidUpd]);
16156       continue;
16157     }
16158 
16159     if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI))
16160       return SDValue();
16161     ++I;
16162   }
16163   BaseUpdates.resize(NumValidUpd);
16164 
16165   // Try to fold with other users. Non-constant updates are considered
16166   // first, and constant updates are sorted to not break a sequence of
16167   // strided accesses (if there is any).
16168   std::stable_sort(BaseUpdates.begin(), BaseUpdates.end(),
16169                    [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) {
16170                      return LHS.ConstInc < RHS.ConstInc;
16171                    });
16172   for (BaseUpdateUser &User : BaseUpdates) {
16173     if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI))
16174       return SDValue();
16175   }
16176   return SDValue();
16177 }
16178 
16179 static SDValue PerformVLDCombine(SDNode *N,
16180                                  TargetLowering::DAGCombinerInfo &DCI) {
16181   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
16182     return SDValue();
16183 
16184   return CombineBaseUpdate(N, DCI);
16185 }
16186 
16187 static SDValue PerformMVEVLDCombine(SDNode *N,
16188                                     TargetLowering::DAGCombinerInfo &DCI) {
16189   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
16190     return SDValue();
16191 
16192   SelectionDAG &DAG = DCI.DAG;
16193   SDValue Addr = N->getOperand(2);
16194   MemSDNode *MemN = cast<MemSDNode>(N);
16195   SDLoc dl(N);
16196 
16197   // For the stores, where there are multiple intrinsics we only actually want
16198   // to post-inc the last of the them.
16199   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
16200   if (IntNo == Intrinsic::arm_mve_vst2q &&
16201       cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1)
16202     return SDValue();
16203   if (IntNo == Intrinsic::arm_mve_vst4q &&
16204       cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3)
16205     return SDValue();
16206 
16207   // Search for a use of the address operand that is an increment.
16208   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
16209                             UE = Addr.getNode()->use_end();
16210        UI != UE; ++UI) {
16211     SDNode *User = *UI;
16212     if (User->getOpcode() != ISD::ADD ||
16213         UI.getUse().getResNo() != Addr.getResNo())
16214       continue;
16215 
16216     // Check that the add is independent of the load/store.  Otherwise, folding
16217     // it would create a cycle. We can avoid searching through Addr as it's a
16218     // predecessor to both.
16219     SmallPtrSet<const SDNode *, 32> Visited;
16220     SmallVector<const SDNode *, 16> Worklist;
16221     Visited.insert(Addr.getNode());
16222     Worklist.push_back(N);
16223     Worklist.push_back(User);
16224     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
16225         SDNode::hasPredecessorHelper(User, Visited, Worklist))
16226       continue;
16227 
16228     // Find the new opcode for the updating load/store.
16229     bool isLoadOp = true;
16230     unsigned NewOpc = 0;
16231     unsigned NumVecs = 0;
16232     switch (IntNo) {
16233     default:
16234       llvm_unreachable("unexpected intrinsic for MVE VLDn combine");
16235     case Intrinsic::arm_mve_vld2q:
16236       NewOpc = ARMISD::VLD2_UPD;
16237       NumVecs = 2;
16238       break;
16239     case Intrinsic::arm_mve_vld4q:
16240       NewOpc = ARMISD::VLD4_UPD;
16241       NumVecs = 4;
16242       break;
16243     case Intrinsic::arm_mve_vst2q:
16244       NewOpc = ARMISD::VST2_UPD;
16245       NumVecs = 2;
16246       isLoadOp = false;
16247       break;
16248     case Intrinsic::arm_mve_vst4q:
16249       NewOpc = ARMISD::VST4_UPD;
16250       NumVecs = 4;
16251       isLoadOp = false;
16252       break;
16253     }
16254 
16255     // Find the size of memory referenced by the load/store.
16256     EVT VecTy;
16257     if (isLoadOp) {
16258       VecTy = N->getValueType(0);
16259     } else {
16260       VecTy = N->getOperand(3).getValueType();
16261     }
16262 
16263     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
16264 
16265     // If the increment is a constant, it must match the memory ref size.
16266     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
16267     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
16268     if (!CInc || CInc->getZExtValue() != NumBytes)
16269       continue;
16270 
16271     // Create the new updating load/store node.
16272     // First, create an SDVTList for the new updating node's results.
16273     EVT Tys[6];
16274     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
16275     unsigned n;
16276     for (n = 0; n < NumResultVecs; ++n)
16277       Tys[n] = VecTy;
16278     Tys[n++] = MVT::i32;
16279     Tys[n] = MVT::Other;
16280     SDVTList SDTys = DAG.getVTList(ArrayRef(Tys, NumResultVecs + 2));
16281 
16282     // Then, gather the new node's operands.
16283     SmallVector<SDValue, 8> Ops;
16284     Ops.push_back(N->getOperand(0)); // incoming chain
16285     Ops.push_back(N->getOperand(2)); // ptr
16286     Ops.push_back(Inc);
16287 
16288     for (unsigned i = 3; i < N->getNumOperands(); ++i)
16289       Ops.push_back(N->getOperand(i));
16290 
16291     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy,
16292                                            MemN->getMemOperand());
16293 
16294     // Update the uses.
16295     SmallVector<SDValue, 5> NewResults;
16296     for (unsigned i = 0; i < NumResultVecs; ++i)
16297       NewResults.push_back(SDValue(UpdN.getNode(), i));
16298 
16299     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
16300     DCI.CombineTo(N, NewResults);
16301     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
16302 
16303     break;
16304   }
16305 
16306   return SDValue();
16307 }
16308 
16309 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
16310 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
16311 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
16312 /// return true.
16313 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
16314   SelectionDAG &DAG = DCI.DAG;
16315   EVT VT = N->getValueType(0);
16316   // vldN-dup instructions only support 64-bit vectors for N > 1.
16317   if (!VT.is64BitVector())
16318     return false;
16319 
16320   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
16321   SDNode *VLD = N->getOperand(0).getNode();
16322   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
16323     return false;
16324   unsigned NumVecs = 0;
16325   unsigned NewOpc = 0;
16326   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
16327   if (IntNo == Intrinsic::arm_neon_vld2lane) {
16328     NumVecs = 2;
16329     NewOpc = ARMISD::VLD2DUP;
16330   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
16331     NumVecs = 3;
16332     NewOpc = ARMISD::VLD3DUP;
16333   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
16334     NumVecs = 4;
16335     NewOpc = ARMISD::VLD4DUP;
16336   } else {
16337     return false;
16338   }
16339 
16340   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
16341   // numbers match the load.
16342   unsigned VLDLaneNo =
16343     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
16344   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
16345        UI != UE; ++UI) {
16346     // Ignore uses of the chain result.
16347     if (UI.getUse().getResNo() == NumVecs)
16348       continue;
16349     SDNode *User = *UI;
16350     if (User->getOpcode() != ARMISD::VDUPLANE ||
16351         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
16352       return false;
16353   }
16354 
16355   // Create the vldN-dup node.
16356   EVT Tys[5];
16357   unsigned n;
16358   for (n = 0; n < NumVecs; ++n)
16359     Tys[n] = VT;
16360   Tys[n] = MVT::Other;
16361   SDVTList SDTys = DAG.getVTList(ArrayRef(Tys, NumVecs + 1));
16362   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
16363   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
16364   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
16365                                            Ops, VLDMemInt->getMemoryVT(),
16366                                            VLDMemInt->getMemOperand());
16367 
16368   // Update the uses.
16369   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
16370        UI != UE; ++UI) {
16371     unsigned ResNo = UI.getUse().getResNo();
16372     // Ignore uses of the chain result.
16373     if (ResNo == NumVecs)
16374       continue;
16375     SDNode *User = *UI;
16376     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
16377   }
16378 
16379   // Now the vldN-lane intrinsic is dead except for its chain result.
16380   // Update uses of the chain.
16381   std::vector<SDValue> VLDDupResults;
16382   for (unsigned n = 0; n < NumVecs; ++n)
16383     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
16384   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
16385   DCI.CombineTo(VLD, VLDDupResults);
16386 
16387   return true;
16388 }
16389 
16390 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
16391 /// ARMISD::VDUPLANE.
16392 static SDValue PerformVDUPLANECombine(SDNode *N,
16393                                       TargetLowering::DAGCombinerInfo &DCI,
16394                                       const ARMSubtarget *Subtarget) {
16395   SDValue Op = N->getOperand(0);
16396   EVT VT = N->getValueType(0);
16397 
16398   // On MVE, we just convert the VDUPLANE to a VDUP with an extract.
16399   if (Subtarget->hasMVEIntegerOps()) {
16400     EVT ExtractVT = VT.getVectorElementType();
16401     // We need to ensure we are creating a legal type.
16402     if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT))
16403       ExtractVT = MVT::i32;
16404     SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT,
16405                               N->getOperand(0), N->getOperand(1));
16406     return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract);
16407   }
16408 
16409   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
16410   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
16411   if (CombineVLDDUP(N, DCI))
16412     return SDValue(N, 0);
16413 
16414   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
16415   // redundant.  Ignore bit_converts for now; element sizes are checked below.
16416   while (Op.getOpcode() == ISD::BITCAST)
16417     Op = Op.getOperand(0);
16418   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
16419     return SDValue();
16420 
16421   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
16422   unsigned EltSize = Op.getScalarValueSizeInBits();
16423   // The canonical VMOV for a zero vector uses a 32-bit element size.
16424   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
16425   unsigned EltBits;
16426   if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
16427     EltSize = 8;
16428   if (EltSize > VT.getScalarSizeInBits())
16429     return SDValue();
16430 
16431   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
16432 }
16433 
16434 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
16435 static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG,
16436                                   const ARMSubtarget *Subtarget) {
16437   SDValue Op = N->getOperand(0);
16438   SDLoc dl(N);
16439 
16440   if (Subtarget->hasMVEIntegerOps()) {
16441     // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will
16442     // need to come from a GPR.
16443     if (Op.getValueType() == MVT::f32)
16444       return DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
16445                          DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op));
16446     else if (Op.getValueType() == MVT::f16)
16447       return DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
16448                          DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op));
16449   }
16450 
16451   if (!Subtarget->hasNEON())
16452     return SDValue();
16453 
16454   // Match VDUP(LOAD) -> VLD1DUP.
16455   // We match this pattern here rather than waiting for isel because the
16456   // transform is only legal for unindexed loads.
16457   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
16458   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
16459       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
16460     SDValue Ops[] = {LD->getOperand(0), LD->getOperand(1),
16461                      DAG.getConstant(LD->getAlign().value(), SDLoc(N), MVT::i32)};
16462     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
16463     SDValue VLDDup =
16464         DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, Ops,
16465                                 LD->getMemoryVT(), LD->getMemOperand());
16466     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
16467     return VLDDup;
16468   }
16469 
16470   return SDValue();
16471 }
16472 
16473 static SDValue PerformLOADCombine(SDNode *N,
16474                                   TargetLowering::DAGCombinerInfo &DCI,
16475                                   const ARMSubtarget *Subtarget) {
16476   EVT VT = N->getValueType(0);
16477 
16478   // If this is a legal vector load, try to combine it into a VLD1_UPD.
16479   if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() &&
16480       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
16481     return CombineBaseUpdate(N, DCI);
16482 
16483   return SDValue();
16484 }
16485 
16486 // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
16487 // pack all of the elements in one place.  Next, store to memory in fewer
16488 // chunks.
16489 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
16490                                              SelectionDAG &DAG) {
16491   SDValue StVal = St->getValue();
16492   EVT VT = StVal.getValueType();
16493   if (!St->isTruncatingStore() || !VT.isVector())
16494     return SDValue();
16495   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16496   EVT StVT = St->getMemoryVT();
16497   unsigned NumElems = VT.getVectorNumElements();
16498   assert(StVT != VT && "Cannot truncate to the same type");
16499   unsigned FromEltSz = VT.getScalarSizeInBits();
16500   unsigned ToEltSz = StVT.getScalarSizeInBits();
16501 
16502   // From, To sizes and ElemCount must be pow of two
16503   if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz))
16504     return SDValue();
16505 
16506   // We are going to use the original vector elt for storing.
16507   // Accumulated smaller vector elements must be a multiple of the store size.
16508   if (0 != (NumElems * FromEltSz) % ToEltSz)
16509     return SDValue();
16510 
16511   unsigned SizeRatio = FromEltSz / ToEltSz;
16512   assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
16513 
16514   // Create a type on which we perform the shuffle.
16515   EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
16516                                    NumElems * SizeRatio);
16517   assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
16518 
16519   SDLoc DL(St);
16520   SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
16521   SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
16522   for (unsigned i = 0; i < NumElems; ++i)
16523     ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1
16524                                                       : i * SizeRatio;
16525 
16526   // Can't shuffle using an illegal type.
16527   if (!TLI.isTypeLegal(WideVecVT))
16528     return SDValue();
16529 
16530   SDValue Shuff = DAG.getVectorShuffle(
16531       WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec);
16532   // At this point all of the data is stored at the bottom of the
16533   // register. We now need to save it to mem.
16534 
16535   // Find the largest store unit
16536   MVT StoreType = MVT::i8;
16537   for (MVT Tp : MVT::integer_valuetypes()) {
16538     if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
16539       StoreType = Tp;
16540   }
16541   // Didn't find a legal store type.
16542   if (!TLI.isTypeLegal(StoreType))
16543     return SDValue();
16544 
16545   // Bitcast the original vector into a vector of store-size units
16546   EVT StoreVecVT =
16547       EVT::getVectorVT(*DAG.getContext(), StoreType,
16548                        VT.getSizeInBits() / EVT(StoreType).getSizeInBits());
16549   assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
16550   SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
16551   SmallVector<SDValue, 8> Chains;
16552   SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
16553                                       TLI.getPointerTy(DAG.getDataLayout()));
16554   SDValue BasePtr = St->getBasePtr();
16555 
16556   // Perform one or more big stores into memory.
16557   unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits();
16558   for (unsigned I = 0; I < E; I++) {
16559     SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType,
16560                                  ShuffWide, DAG.getIntPtrConstant(I, DL));
16561     SDValue Ch =
16562         DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(),
16563                      St->getAlign(), St->getMemOperand()->getFlags());
16564     BasePtr =
16565         DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
16566     Chains.push_back(Ch);
16567   }
16568   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
16569 }
16570 
16571 // Try taking a single vector store from an fpround (which would otherwise turn
16572 // into an expensive buildvector) and splitting it into a series of narrowing
16573 // stores.
16574 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
16575                                                  SelectionDAG &DAG) {
16576   if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
16577     return SDValue();
16578   SDValue Trunc = St->getValue();
16579   if (Trunc->getOpcode() != ISD::FP_ROUND)
16580     return SDValue();
16581   EVT FromVT = Trunc->getOperand(0).getValueType();
16582   EVT ToVT = Trunc.getValueType();
16583   if (!ToVT.isVector())
16584     return SDValue();
16585   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
16586   EVT ToEltVT = ToVT.getVectorElementType();
16587   EVT FromEltVT = FromVT.getVectorElementType();
16588 
16589   if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16)
16590     return SDValue();
16591 
16592   unsigned NumElements = 4;
16593   if (FromVT.getVectorNumElements() % NumElements != 0)
16594     return SDValue();
16595 
16596   // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so
16597   // use the VMOVN over splitting the store. We are looking for patterns of:
16598   // !rev: 0 N 1 N+1 2 N+2 ...
16599   //  rev: N 0 N+1 1 N+2 2 ...
16600   // The shuffle may either be a single source (in which case N = NumElts/2) or
16601   // two inputs extended with concat to the same size (in which case N =
16602   // NumElts).
16603   auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) {
16604     ArrayRef<int> M = SVN->getMask();
16605     unsigned NumElts = ToVT.getVectorNumElements();
16606     if (SVN->getOperand(1).isUndef())
16607       NumElts /= 2;
16608 
16609     unsigned Off0 = Rev ? NumElts : 0;
16610     unsigned Off1 = Rev ? 0 : NumElts;
16611 
16612     for (unsigned I = 0; I < NumElts; I += 2) {
16613       if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2))
16614         return false;
16615       if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2))
16616         return false;
16617     }
16618 
16619     return true;
16620   };
16621 
16622   if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc.getOperand(0)))
16623     if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true))
16624       return SDValue();
16625 
16626   LLVMContext &C = *DAG.getContext();
16627   SDLoc DL(St);
16628   // Details about the old store
16629   SDValue Ch = St->getChain();
16630   SDValue BasePtr = St->getBasePtr();
16631   Align Alignment = St->getOriginalAlign();
16632   MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
16633   AAMDNodes AAInfo = St->getAAInfo();
16634 
16635   // We split the store into slices of NumElements. fp16 trunc stores are vcvt
16636   // and then stored as truncating integer stores.
16637   EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements);
16638   EVT NewToVT = EVT::getVectorVT(
16639       C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements);
16640 
16641   SmallVector<SDValue, 4> Stores;
16642   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
16643     unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
16644     SDValue NewPtr =
16645         DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
16646 
16647     SDValue Extract =
16648         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
16649                     DAG.getConstant(i * NumElements, DL, MVT::i32));
16650 
16651     SDValue FPTrunc =
16652         DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16),
16653                     Extract, DAG.getConstant(0, DL, MVT::i32));
16654     Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc);
16655 
16656     SDValue Store = DAG.getTruncStore(
16657         Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
16658         NewToVT, Alignment, MMOFlags, AAInfo);
16659     Stores.push_back(Store);
16660   }
16661   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
16662 }
16663 
16664 // Try taking a single vector store from an MVETRUNC (which would otherwise turn
16665 // into an expensive buildvector) and splitting it into a series of narrowing
16666 // stores.
16667 static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St,
16668                                                          SelectionDAG &DAG) {
16669   if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
16670     return SDValue();
16671   SDValue Trunc = St->getValue();
16672   if (Trunc->getOpcode() != ARMISD::MVETRUNC)
16673     return SDValue();
16674   EVT FromVT = Trunc->getOperand(0).getValueType();
16675   EVT ToVT = Trunc.getValueType();
16676 
16677   LLVMContext &C = *DAG.getContext();
16678   SDLoc DL(St);
16679   // Details about the old store
16680   SDValue Ch = St->getChain();
16681   SDValue BasePtr = St->getBasePtr();
16682   Align Alignment = St->getOriginalAlign();
16683   MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
16684   AAMDNodes AAInfo = St->getAAInfo();
16685 
16686   EVT NewToVT = EVT::getVectorVT(C, ToVT.getVectorElementType(),
16687                                  FromVT.getVectorNumElements());
16688 
16689   SmallVector<SDValue, 4> Stores;
16690   for (unsigned i = 0; i < Trunc.getNumOperands(); i++) {
16691     unsigned NewOffset =
16692         i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8;
16693     SDValue NewPtr =
16694         DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
16695 
16696     SDValue Extract = Trunc.getOperand(i);
16697     SDValue Store = DAG.getTruncStore(
16698         Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
16699         NewToVT, Alignment, MMOFlags, AAInfo);
16700     Stores.push_back(Store);
16701   }
16702   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
16703 }
16704 
16705 // Given a floating point store from an extracted vector, with an integer
16706 // VGETLANE that already exists, store the existing VGETLANEu directly. This can
16707 // help reduce fp register pressure, doesn't require the fp extract and allows
16708 // use of more integer post-inc stores not available with vstr.
16709 static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG) {
16710   if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
16711     return SDValue();
16712   SDValue Extract = St->getValue();
16713   EVT VT = Extract.getValueType();
16714   // For now only uses f16. This may be useful for f32 too, but that will
16715   // be bitcast(extract), not the VGETLANEu we currently check here.
16716   if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
16717     return SDValue();
16718 
16719   SDNode *GetLane =
16720       DAG.getNodeIfExists(ARMISD::VGETLANEu, DAG.getVTList(MVT::i32),
16721                           {Extract.getOperand(0), Extract.getOperand(1)});
16722   if (!GetLane)
16723     return SDValue();
16724 
16725   LLVMContext &C = *DAG.getContext();
16726   SDLoc DL(St);
16727   // Create a new integer store to replace the existing floating point version.
16728   SDValue Ch = St->getChain();
16729   SDValue BasePtr = St->getBasePtr();
16730   Align Alignment = St->getOriginalAlign();
16731   MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
16732   AAMDNodes AAInfo = St->getAAInfo();
16733   EVT NewToVT = EVT::getIntegerVT(C, VT.getSizeInBits());
16734   SDValue Store = DAG.getTruncStore(Ch, DL, SDValue(GetLane, 0), BasePtr,
16735                                     St->getPointerInfo(), NewToVT, Alignment,
16736                                     MMOFlags, AAInfo);
16737 
16738   return Store;
16739 }
16740 
16741 /// PerformSTORECombine - Target-specific dag combine xforms for
16742 /// ISD::STORE.
16743 static SDValue PerformSTORECombine(SDNode *N,
16744                                    TargetLowering::DAGCombinerInfo &DCI,
16745                                    const ARMSubtarget *Subtarget) {
16746   StoreSDNode *St = cast<StoreSDNode>(N);
16747   if (St->isVolatile())
16748     return SDValue();
16749   SDValue StVal = St->getValue();
16750   EVT VT = StVal.getValueType();
16751 
16752   if (Subtarget->hasNEON())
16753     if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG))
16754       return Store;
16755 
16756   if (Subtarget->hasMVEIntegerOps()) {
16757     if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG))
16758       return NewToken;
16759     if (SDValue NewChain = PerformExtractFpToIntStores(St, DCI.DAG))
16760       return NewChain;
16761     if (SDValue NewToken =
16762             PerformSplittingMVETruncToNarrowingStores(St, DCI.DAG))
16763       return NewToken;
16764   }
16765 
16766   if (!ISD::isNormalStore(St))
16767     return SDValue();
16768 
16769   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
16770   // ARM stores of arguments in the same cache line.
16771   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
16772       StVal.getNode()->hasOneUse()) {
16773     SelectionDAG  &DAG = DCI.DAG;
16774     bool isBigEndian = DAG.getDataLayout().isBigEndian();
16775     SDLoc DL(St);
16776     SDValue BasePtr = St->getBasePtr();
16777     SDValue NewST1 = DAG.getStore(
16778         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
16779         BasePtr, St->getPointerInfo(), St->getOriginalAlign(),
16780         St->getMemOperand()->getFlags());
16781 
16782     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
16783                                     DAG.getConstant(4, DL, MVT::i32));
16784     return DAG.getStore(NewST1.getValue(0), DL,
16785                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
16786                         OffsetPtr, St->getPointerInfo().getWithOffset(4),
16787                         St->getOriginalAlign(),
16788                         St->getMemOperand()->getFlags());
16789   }
16790 
16791   if (StVal.getValueType() == MVT::i64 &&
16792       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
16793 
16794     // Bitcast an i64 store extracted from a vector to f64.
16795     // Otherwise, the i64 value will be legalized to a pair of i32 values.
16796     SelectionDAG &DAG = DCI.DAG;
16797     SDLoc dl(StVal);
16798     SDValue IntVec = StVal.getOperand(0);
16799     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
16800                                    IntVec.getValueType().getVectorNumElements());
16801     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
16802     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
16803                                  Vec, StVal.getOperand(1));
16804     dl = SDLoc(N);
16805     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
16806     // Make the DAGCombiner fold the bitcasts.
16807     DCI.AddToWorklist(Vec.getNode());
16808     DCI.AddToWorklist(ExtElt.getNode());
16809     DCI.AddToWorklist(V.getNode());
16810     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
16811                         St->getPointerInfo(), St->getAlign(),
16812                         St->getMemOperand()->getFlags(), St->getAAInfo());
16813   }
16814 
16815   // If this is a legal vector store, try to combine it into a VST1_UPD.
16816   if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() &&
16817       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
16818     return CombineBaseUpdate(N, DCI);
16819 
16820   return SDValue();
16821 }
16822 
16823 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
16824 /// can replace combinations of VMUL and VCVT (floating-point to integer)
16825 /// when the VMUL has a constant operand that is a power of 2.
16826 ///
16827 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
16828 ///  vmul.f32        d16, d17, d16
16829 ///  vcvt.s32.f32    d16, d16
16830 /// becomes:
16831 ///  vcvt.s32.f32    d16, d16, #3
16832 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
16833                                   const ARMSubtarget *Subtarget) {
16834   if (!Subtarget->hasNEON())
16835     return SDValue();
16836 
16837   SDValue Op = N->getOperand(0);
16838   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
16839       Op.getOpcode() != ISD::FMUL)
16840     return SDValue();
16841 
16842   SDValue ConstVec = Op->getOperand(1);
16843   if (!isa<BuildVectorSDNode>(ConstVec))
16844     return SDValue();
16845 
16846   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
16847   uint32_t FloatBits = FloatTy.getSizeInBits();
16848   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
16849   uint32_t IntBits = IntTy.getSizeInBits();
16850   unsigned NumLanes = Op.getValueType().getVectorNumElements();
16851   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
16852     // These instructions only exist converting from f32 to i32. We can handle
16853     // smaller integers by generating an extra truncate, but larger ones would
16854     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
16855     // these intructions only support v2i32/v4i32 types.
16856     return SDValue();
16857   }
16858 
16859   BitVector UndefElements;
16860   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
16861   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
16862   if (C == -1 || C == 0 || C > 32)
16863     return SDValue();
16864 
16865   SDLoc dl(N);
16866   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
16867   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
16868     Intrinsic::arm_neon_vcvtfp2fxu;
16869   SDValue FixConv = DAG.getNode(
16870       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
16871       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
16872       DAG.getConstant(C, dl, MVT::i32));
16873 
16874   if (IntBits < FloatBits)
16875     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
16876 
16877   return FixConv;
16878 }
16879 
16880 static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG,
16881                                          const ARMSubtarget *Subtarget) {
16882   if (!Subtarget->hasMVEFloatOps())
16883     return SDValue();
16884 
16885   // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x)
16886   // The second form can be more easily turned into a predicated vadd, and
16887   // possibly combined into a fma to become a predicated vfma.
16888   SDValue Op0 = N->getOperand(0);
16889   SDValue Op1 = N->getOperand(1);
16890   EVT VT = N->getValueType(0);
16891   SDLoc DL(N);
16892 
16893   // The identity element for a fadd is -0.0 or +0.0 when the nsz flag is set,
16894   // which these VMOV's represent.
16895   auto isIdentitySplat = [&](SDValue Op, bool NSZ) {
16896     if (Op.getOpcode() != ISD::BITCAST ||
16897         Op.getOperand(0).getOpcode() != ARMISD::VMOVIMM)
16898       return false;
16899     uint64_t ImmVal = Op.getOperand(0).getConstantOperandVal(0);
16900     if (VT == MVT::v4f32 && (ImmVal == 1664 || (ImmVal == 0 && NSZ)))
16901       return true;
16902     if (VT == MVT::v8f16 && (ImmVal == 2688 || (ImmVal == 0 && NSZ)))
16903       return true;
16904     return false;
16905   };
16906 
16907   if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT)
16908     std::swap(Op0, Op1);
16909 
16910   if (Op1.getOpcode() != ISD::VSELECT)
16911     return SDValue();
16912 
16913   SDNodeFlags FaddFlags = N->getFlags();
16914   bool NSZ = FaddFlags.hasNoSignedZeros();
16915   if (!isIdentitySplat(Op1.getOperand(2), NSZ))
16916     return SDValue();
16917 
16918   SDValue FAdd =
16919       DAG.getNode(ISD::FADD, DL, VT, Op0, Op1.getOperand(1), FaddFlags);
16920   return DAG.getNode(ISD::VSELECT, DL, VT, Op1.getOperand(0), FAdd, Op0, FaddFlags);
16921 }
16922 
16923 static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) {
16924   SDValue LHS = N->getOperand(0);
16925   SDValue RHS = N->getOperand(1);
16926   EVT VT = N->getValueType(0);
16927   SDLoc DL(N);
16928 
16929   if (!N->getFlags().hasAllowReassociation())
16930     return SDValue();
16931 
16932   // Combine fadd(a, vcmla(b, c, d)) -> vcmla(fadd(a, b), b, c)
16933   auto ReassocComplex = [&](SDValue A, SDValue B) {
16934     if (A.getOpcode() != ISD::INTRINSIC_WO_CHAIN)
16935       return SDValue();
16936     unsigned Opc = A.getConstantOperandVal(0);
16937     if (Opc != Intrinsic::arm_mve_vcmlaq)
16938       return SDValue();
16939     SDValue VCMLA = DAG.getNode(
16940         ISD::INTRINSIC_WO_CHAIN, DL, VT, A.getOperand(0), A.getOperand(1),
16941         DAG.getNode(ISD::FADD, DL, VT, A.getOperand(2), B, N->getFlags()),
16942         A.getOperand(3), A.getOperand(4));
16943     VCMLA->setFlags(A->getFlags());
16944     return VCMLA;
16945   };
16946   if (SDValue R = ReassocComplex(LHS, RHS))
16947     return R;
16948   if (SDValue R = ReassocComplex(RHS, LHS))
16949     return R;
16950 
16951   return SDValue();
16952 }
16953 
16954 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
16955                                   const ARMSubtarget *Subtarget) {
16956   if (SDValue S = PerformFAddVSelectCombine(N, DAG, Subtarget))
16957     return S;
16958   if (SDValue S = PerformFADDVCMLACombine(N, DAG))
16959     return S;
16960   return SDValue();
16961 }
16962 
16963 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
16964 /// can replace combinations of VCVT (integer to floating-point) and VDIV
16965 /// when the VDIV has a constant operand that is a power of 2.
16966 ///
16967 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
16968 ///  vcvt.f32.s32    d16, d16
16969 ///  vdiv.f32        d16, d17, d16
16970 /// becomes:
16971 ///  vcvt.f32.s32    d16, d16, #3
16972 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
16973                                   const ARMSubtarget *Subtarget) {
16974   if (!Subtarget->hasNEON())
16975     return SDValue();
16976 
16977   SDValue Op = N->getOperand(0);
16978   unsigned OpOpcode = Op.getNode()->getOpcode();
16979   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
16980       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
16981     return SDValue();
16982 
16983   SDValue ConstVec = N->getOperand(1);
16984   if (!isa<BuildVectorSDNode>(ConstVec))
16985     return SDValue();
16986 
16987   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
16988   uint32_t FloatBits = FloatTy.getSizeInBits();
16989   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
16990   uint32_t IntBits = IntTy.getSizeInBits();
16991   unsigned NumLanes = Op.getValueType().getVectorNumElements();
16992   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
16993     // These instructions only exist converting from i32 to f32. We can handle
16994     // smaller integers by generating an extra extend, but larger ones would
16995     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
16996     // these intructions only support v2i32/v4i32 types.
16997     return SDValue();
16998   }
16999 
17000   BitVector UndefElements;
17001   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
17002   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
17003   if (C == -1 || C == 0 || C > 32)
17004     return SDValue();
17005 
17006   SDLoc dl(N);
17007   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
17008   SDValue ConvInput = Op.getOperand(0);
17009   if (IntBits < FloatBits)
17010     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
17011                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
17012                             ConvInput);
17013 
17014   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
17015     Intrinsic::arm_neon_vcvtfxu2fp;
17016   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
17017                      Op.getValueType(),
17018                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
17019                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
17020 }
17021 
17022 static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
17023                                            const ARMSubtarget *ST) {
17024   if (!ST->hasMVEIntegerOps())
17025     return SDValue();
17026 
17027   assert(N->getOpcode() == ISD::VECREDUCE_ADD);
17028   EVT ResVT = N->getValueType(0);
17029   SDValue N0 = N->getOperand(0);
17030   SDLoc dl(N);
17031 
17032   // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y)
17033   if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD &&
17034       (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 ||
17035        N0.getValueType() == MVT::v16i8)) {
17036     SDValue Red0 = DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, N0.getOperand(0));
17037     SDValue Red1 = DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, N0.getOperand(1));
17038     return DAG.getNode(ISD::ADD, dl, ResVT, Red0, Red1);
17039   }
17040 
17041   // We are looking for something that will have illegal types if left alone,
17042   // but that we can convert to a single instruction under MVE. For example
17043   // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A
17044   // or
17045   // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B
17046 
17047   // The legal cases are:
17048   //   VADDV u/s 8/16/32
17049   //   VMLAV u/s 8/16/32
17050   //   VADDLV u/s 32
17051   //   VMLALV u/s 16/32
17052 
17053   // If the input vector is smaller than legal (v4i8/v4i16 for example) we can
17054   // extend it and use v4i32 instead.
17055   auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) {
17056     EVT AVT = A.getValueType();
17057     return any_of(ExtTypes, [&](MVT Ty) {
17058       return AVT.getVectorNumElements() == Ty.getVectorNumElements() &&
17059              AVT.bitsLE(Ty);
17060     });
17061   };
17062   auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) {
17063     EVT AVT = A.getValueType();
17064     if (!AVT.is128BitVector())
17065       A = DAG.getNode(ExtendCode, dl,
17066                       AVT.changeVectorElementType(MVT::getIntegerVT(
17067                           128 / AVT.getVectorMinNumElements())),
17068                       A);
17069     return A;
17070   };
17071   auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) {
17072     if (ResVT != RetTy || N0->getOpcode() != ExtendCode)
17073       return SDValue();
17074     SDValue A = N0->getOperand(0);
17075     if (ExtTypeMatches(A, ExtTypes))
17076       return ExtendIfNeeded(A, ExtendCode);
17077     return SDValue();
17078   };
17079   auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode,
17080                          ArrayRef<MVT> ExtTypes, SDValue &Mask) {
17081     if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT ||
17082         !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode()))
17083       return SDValue();
17084     Mask = N0->getOperand(0);
17085     SDValue Ext = N0->getOperand(1);
17086     if (Ext->getOpcode() != ExtendCode)
17087       return SDValue();
17088     SDValue A = Ext->getOperand(0);
17089     if (ExtTypeMatches(A, ExtTypes))
17090       return ExtendIfNeeded(A, ExtendCode);
17091     return SDValue();
17092   };
17093   auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
17094                      SDValue &A, SDValue &B) {
17095     // For a vmla we are trying to match a larger pattern:
17096     // ExtA = sext/zext A
17097     // ExtB = sext/zext B
17098     // Mul = mul ExtA, ExtB
17099     // vecreduce.add Mul
17100     // There might also be en extra extend between the mul and the addreduce, so
17101     // long as the bitwidth is high enough to make them equivalent (for example
17102     // original v8i16 might be mul at v8i32 and the reduce happens at v8i64).
17103     if (ResVT != RetTy)
17104       return false;
17105     SDValue Mul = N0;
17106     if (Mul->getOpcode() == ExtendCode &&
17107         Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
17108             ResVT.getScalarSizeInBits())
17109       Mul = Mul->getOperand(0);
17110     if (Mul->getOpcode() != ISD::MUL)
17111       return false;
17112     SDValue ExtA = Mul->getOperand(0);
17113     SDValue ExtB = Mul->getOperand(1);
17114     if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode)
17115       return false;
17116     A = ExtA->getOperand(0);
17117     B = ExtB->getOperand(0);
17118     if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) {
17119       A = ExtendIfNeeded(A, ExtendCode);
17120       B = ExtendIfNeeded(B, ExtendCode);
17121       return true;
17122     }
17123     return false;
17124   };
17125   auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
17126                      SDValue &A, SDValue &B, SDValue &Mask) {
17127     // Same as the pattern above with a select for the zero predicated lanes
17128     // ExtA = sext/zext A
17129     // ExtB = sext/zext B
17130     // Mul = mul ExtA, ExtB
17131     // N0 = select Mask, Mul, 0
17132     // vecreduce.add N0
17133     if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT ||
17134         !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode()))
17135       return false;
17136     Mask = N0->getOperand(0);
17137     SDValue Mul = N0->getOperand(1);
17138     if (Mul->getOpcode() == ExtendCode &&
17139         Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
17140             ResVT.getScalarSizeInBits())
17141       Mul = Mul->getOperand(0);
17142     if (Mul->getOpcode() != ISD::MUL)
17143       return false;
17144     SDValue ExtA = Mul->getOperand(0);
17145     SDValue ExtB = Mul->getOperand(1);
17146     if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode)
17147       return false;
17148     A = ExtA->getOperand(0);
17149     B = ExtB->getOperand(0);
17150     if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) {
17151       A = ExtendIfNeeded(A, ExtendCode);
17152       B = ExtendIfNeeded(B, ExtendCode);
17153       return true;
17154     }
17155     return false;
17156   };
17157   auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) {
17158     // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64
17159     // reductions. The operands are extended with MVEEXT, but as they are
17160     // reductions the lane orders do not matter. MVEEXT may be combined with
17161     // loads to produce two extending loads, or else they will be expanded to
17162     // VREV/VMOVL.
17163     EVT VT = Ops[0].getValueType();
17164     if (VT == MVT::v16i8) {
17165       assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) &&
17166              "Unexpected illegal long reduction opcode");
17167       bool IsUnsigned = Opcode == ARMISD::VMLALVu;
17168 
17169       SDValue Ext0 =
17170           DAG.getNode(IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, dl,
17171                       DAG.getVTList(MVT::v8i16, MVT::v8i16), Ops[0]);
17172       SDValue Ext1 =
17173           DAG.getNode(IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, dl,
17174                       DAG.getVTList(MVT::v8i16, MVT::v8i16), Ops[1]);
17175 
17176       SDValue MLA0 = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
17177                                  Ext0, Ext1);
17178       SDValue MLA1 =
17179           DAG.getNode(IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, dl,
17180                       DAG.getVTList(MVT::i32, MVT::i32), MLA0, MLA0.getValue(1),
17181                       Ext0.getValue(1), Ext1.getValue(1));
17182       return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, MLA1, MLA1.getValue(1));
17183     }
17184     SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops);
17185     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node,
17186                        SDValue(Node.getNode(), 1));
17187   };
17188 
17189   SDValue A, B;
17190   SDValue Mask;
17191   if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
17192     return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);
17193   if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
17194     return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B);
17195   if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32},
17196               A, B))
17197     return Create64bitNode(ARMISD::VMLALVs, {A, B});
17198   if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32},
17199               A, B))
17200     return Create64bitNode(ARMISD::VMLALVu, {A, B});
17201   if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B))
17202     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17203                        DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B));
17204   if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B))
17205     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17206                        DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B));
17207 
17208   if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B,
17209                   Mask))
17210     return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask);
17211   if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B,
17212                   Mask))
17213     return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask);
17214   if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B,
17215                   Mask))
17216     return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask});
17217   if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B,
17218                   Mask))
17219     return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask});
17220   if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask))
17221     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17222                        DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask));
17223   if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask))
17224     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17225                        DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask));
17226 
17227   if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}))
17228     return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A);
17229   if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}))
17230     return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A);
17231   if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}))
17232     return Create64bitNode(ARMISD::VADDLVs, {A});
17233   if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
17234     return Create64bitNode(ARMISD::VADDLVu, {A});
17235   if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}))
17236     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17237                        DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A));
17238   if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}))
17239     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17240                        DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A));
17241 
17242   if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
17243     return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask);
17244   if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
17245     return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask);
17246   if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask))
17247     return Create64bitNode(ARMISD::VADDLVps, {A, Mask});
17248   if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask))
17249     return Create64bitNode(ARMISD::VADDLVpu, {A, Mask});
17250   if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask))
17251     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17252                        DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask));
17253   if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask))
17254     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
17255                        DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask));
17256 
17257   // Some complications. We can get a case where the two inputs of the mul are
17258   // the same, then the output sext will have been helpfully converted to a
17259   // zext. Turn it back.
17260   SDValue Op = N0;
17261   if (Op->getOpcode() == ISD::VSELECT)
17262     Op = Op->getOperand(1);
17263   if (Op->getOpcode() == ISD::ZERO_EXTEND &&
17264       Op->getOperand(0)->getOpcode() == ISD::MUL) {
17265     SDValue Mul = Op->getOperand(0);
17266     if (Mul->getOperand(0) == Mul->getOperand(1) &&
17267         Mul->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) {
17268       SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, N0->getValueType(0), Mul);
17269       if (Op != N0)
17270         Ext = DAG.getNode(ISD::VSELECT, dl, N0->getValueType(0),
17271                           N0->getOperand(0), Ext, N0->getOperand(2));
17272       return DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, Ext);
17273     }
17274   }
17275 
17276   return SDValue();
17277 }
17278 
17279 // Looks for vaddv(shuffle) or vmlav(shuffle, shuffle), with a shuffle where all
17280 // the lanes are used. Due to the reduction being commutative the shuffle can be
17281 // removed.
17282 static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {
17283   unsigned VecOp = N->getOperand(0).getValueType().isVector() ? 0 : 2;
17284   auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp));
17285   if (!Shuf || !Shuf->getOperand(1).isUndef())
17286     return SDValue();
17287 
17288   // Check all elements are used once in the mask.
17289   ArrayRef<int> Mask = Shuf->getMask();
17290   APInt SetElts(Mask.size(), 0);
17291   for (int E : Mask) {
17292     if (E < 0 || E >= (int)Mask.size())
17293       return SDValue();
17294     SetElts.setBit(E);
17295   }
17296   if (!SetElts.isAllOnes())
17297     return SDValue();
17298 
17299   if (N->getNumOperands() != VecOp + 1) {
17300     auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp + 1));
17301     if (!Shuf2 || !Shuf2->getOperand(1).isUndef() || Shuf2->getMask() != Mask)
17302       return SDValue();
17303   }
17304 
17305   SmallVector<SDValue> Ops;
17306   for (SDValue Op : N->ops()) {
17307     if (Op.getValueType().isVector())
17308       Ops.push_back(Op.getOperand(0));
17309     else
17310       Ops.push_back(Op);
17311   }
17312   return DAG.getNode(N->getOpcode(), SDLoc(N), N->getVTList(), Ops);
17313 }
17314 
17315 static SDValue PerformVMOVNCombine(SDNode *N,
17316                                    TargetLowering::DAGCombinerInfo &DCI) {
17317   SDValue Op0 = N->getOperand(0);
17318   SDValue Op1 = N->getOperand(1);
17319   unsigned IsTop = N->getConstantOperandVal(2);
17320 
17321   // VMOVNT a undef -> a
17322   // VMOVNB a undef -> a
17323   // VMOVNB undef a -> a
17324   if (Op1->isUndef())
17325     return Op0;
17326   if (Op0->isUndef() && !IsTop)
17327     return Op1;
17328 
17329   // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
17330   // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b)
17331   if ((Op1->getOpcode() == ARMISD::VQMOVNs ||
17332        Op1->getOpcode() == ARMISD::VQMOVNu) &&
17333       Op1->getConstantOperandVal(2) == 0)
17334     return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0),
17335                            Op0, Op1->getOperand(1), N->getOperand(2));
17336 
17337   // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from
17338   // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting
17339   // into the top or bottom lanes.
17340   unsigned NumElts = N->getValueType(0).getVectorNumElements();
17341   APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1));
17342   APInt Op0DemandedElts =
17343       IsTop ? Op1DemandedElts
17344             : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1));
17345 
17346   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
17347   if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, DCI))
17348     return SDValue(N, 0);
17349   if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, DCI))
17350     return SDValue(N, 0);
17351 
17352   return SDValue();
17353 }
17354 
17355 static SDValue PerformVQMOVNCombine(SDNode *N,
17356                                     TargetLowering::DAGCombinerInfo &DCI) {
17357   SDValue Op0 = N->getOperand(0);
17358   unsigned IsTop = N->getConstantOperandVal(2);
17359 
17360   unsigned NumElts = N->getValueType(0).getVectorNumElements();
17361   APInt Op0DemandedElts =
17362       APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
17363                                      : APInt::getHighBitsSet(2, 1));
17364 
17365   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
17366   if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, DCI))
17367     return SDValue(N, 0);
17368   return SDValue();
17369 }
17370 
17371 static SDValue PerformVQDMULHCombine(SDNode *N,
17372                                      TargetLowering::DAGCombinerInfo &DCI) {
17373   EVT VT = N->getValueType(0);
17374   SDValue LHS = N->getOperand(0);
17375   SDValue RHS = N->getOperand(1);
17376 
17377   auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
17378   auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
17379   // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH)
17380   if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
17381       LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
17382       (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
17383     SDLoc DL(N);
17384     SDValue NewBinOp = DCI.DAG.getNode(N->getOpcode(), DL, VT,
17385                                        LHS.getOperand(0), RHS.getOperand(0));
17386     SDValue UndefV = LHS.getOperand(1);
17387     return DCI.DAG.getVectorShuffle(VT, DL, NewBinOp, UndefV, Shuf0->getMask());
17388   }
17389   return SDValue();
17390 }
17391 
17392 static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {
17393   SDLoc DL(N);
17394   SDValue Op0 = N->getOperand(0);
17395   SDValue Op1 = N->getOperand(1);
17396 
17397   // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from
17398   // uses of the intrinsics.
17399   if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
17400     int ShiftAmt = C->getSExtValue();
17401     if (ShiftAmt == 0) {
17402       SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL);
17403       DAG.ReplaceAllUsesWith(N, Merge.getNode());
17404       return SDValue();
17405     }
17406 
17407     if (ShiftAmt >= -32 && ShiftAmt < 0) {
17408       unsigned NewOpcode =
17409           N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL;
17410       SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1,
17411                                      DAG.getConstant(-ShiftAmt, DL, MVT::i32));
17412       DAG.ReplaceAllUsesWith(N, NewShift.getNode());
17413       return NewShift;
17414     }
17415   }
17416 
17417   return SDValue();
17418 }
17419 
17420 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
17421 SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
17422                                                    DAGCombinerInfo &DCI) const {
17423   SelectionDAG &DAG = DCI.DAG;
17424   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
17425   switch (IntNo) {
17426   default:
17427     // Don't do anything for most intrinsics.
17428     break;
17429 
17430   // Vector shifts: check for immediate versions and lower them.
17431   // Note: This is done during DAG combining instead of DAG legalizing because
17432   // the build_vectors for 64-bit vector element shift counts are generally
17433   // not legal, and it is hard to see their values after they get legalized to
17434   // loads from a constant pool.
17435   case Intrinsic::arm_neon_vshifts:
17436   case Intrinsic::arm_neon_vshiftu:
17437   case Intrinsic::arm_neon_vrshifts:
17438   case Intrinsic::arm_neon_vrshiftu:
17439   case Intrinsic::arm_neon_vrshiftn:
17440   case Intrinsic::arm_neon_vqshifts:
17441   case Intrinsic::arm_neon_vqshiftu:
17442   case Intrinsic::arm_neon_vqshiftsu:
17443   case Intrinsic::arm_neon_vqshiftns:
17444   case Intrinsic::arm_neon_vqshiftnu:
17445   case Intrinsic::arm_neon_vqshiftnsu:
17446   case Intrinsic::arm_neon_vqrshiftns:
17447   case Intrinsic::arm_neon_vqrshiftnu:
17448   case Intrinsic::arm_neon_vqrshiftnsu: {
17449     EVT VT = N->getOperand(1).getValueType();
17450     int64_t Cnt;
17451     unsigned VShiftOpc = 0;
17452 
17453     switch (IntNo) {
17454     case Intrinsic::arm_neon_vshifts:
17455     case Intrinsic::arm_neon_vshiftu:
17456       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
17457         VShiftOpc = ARMISD::VSHLIMM;
17458         break;
17459       }
17460       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
17461         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM
17462                                                           : ARMISD::VSHRuIMM);
17463         break;
17464       }
17465       return SDValue();
17466 
17467     case Intrinsic::arm_neon_vrshifts:
17468     case Intrinsic::arm_neon_vrshiftu:
17469       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
17470         break;
17471       return SDValue();
17472 
17473     case Intrinsic::arm_neon_vqshifts:
17474     case Intrinsic::arm_neon_vqshiftu:
17475       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
17476         break;
17477       return SDValue();
17478 
17479     case Intrinsic::arm_neon_vqshiftsu:
17480       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
17481         break;
17482       llvm_unreachable("invalid shift count for vqshlu intrinsic");
17483 
17484     case Intrinsic::arm_neon_vrshiftn:
17485     case Intrinsic::arm_neon_vqshiftns:
17486     case Intrinsic::arm_neon_vqshiftnu:
17487     case Intrinsic::arm_neon_vqshiftnsu:
17488     case Intrinsic::arm_neon_vqrshiftns:
17489     case Intrinsic::arm_neon_vqrshiftnu:
17490     case Intrinsic::arm_neon_vqrshiftnsu:
17491       // Narrowing shifts require an immediate right shift.
17492       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
17493         break;
17494       llvm_unreachable("invalid shift count for narrowing vector shift "
17495                        "intrinsic");
17496 
17497     default:
17498       llvm_unreachable("unhandled vector shift");
17499     }
17500 
17501     switch (IntNo) {
17502     case Intrinsic::arm_neon_vshifts:
17503     case Intrinsic::arm_neon_vshiftu:
17504       // Opcode already set above.
17505       break;
17506     case Intrinsic::arm_neon_vrshifts:
17507       VShiftOpc = ARMISD::VRSHRsIMM;
17508       break;
17509     case Intrinsic::arm_neon_vrshiftu:
17510       VShiftOpc = ARMISD::VRSHRuIMM;
17511       break;
17512     case Intrinsic::arm_neon_vrshiftn:
17513       VShiftOpc = ARMISD::VRSHRNIMM;
17514       break;
17515     case Intrinsic::arm_neon_vqshifts:
17516       VShiftOpc = ARMISD::VQSHLsIMM;
17517       break;
17518     case Intrinsic::arm_neon_vqshiftu:
17519       VShiftOpc = ARMISD::VQSHLuIMM;
17520       break;
17521     case Intrinsic::arm_neon_vqshiftsu:
17522       VShiftOpc = ARMISD::VQSHLsuIMM;
17523       break;
17524     case Intrinsic::arm_neon_vqshiftns:
17525       VShiftOpc = ARMISD::VQSHRNsIMM;
17526       break;
17527     case Intrinsic::arm_neon_vqshiftnu:
17528       VShiftOpc = ARMISD::VQSHRNuIMM;
17529       break;
17530     case Intrinsic::arm_neon_vqshiftnsu:
17531       VShiftOpc = ARMISD::VQSHRNsuIMM;
17532       break;
17533     case Intrinsic::arm_neon_vqrshiftns:
17534       VShiftOpc = ARMISD::VQRSHRNsIMM;
17535       break;
17536     case Intrinsic::arm_neon_vqrshiftnu:
17537       VShiftOpc = ARMISD::VQRSHRNuIMM;
17538       break;
17539     case Intrinsic::arm_neon_vqrshiftnsu:
17540       VShiftOpc = ARMISD::VQRSHRNsuIMM;
17541       break;
17542     }
17543 
17544     SDLoc dl(N);
17545     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
17546                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
17547   }
17548 
17549   case Intrinsic::arm_neon_vshiftins: {
17550     EVT VT = N->getOperand(1).getValueType();
17551     int64_t Cnt;
17552     unsigned VShiftOpc = 0;
17553 
17554     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
17555       VShiftOpc = ARMISD::VSLIIMM;
17556     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
17557       VShiftOpc = ARMISD::VSRIIMM;
17558     else {
17559       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
17560     }
17561 
17562     SDLoc dl(N);
17563     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
17564                        N->getOperand(1), N->getOperand(2),
17565                        DAG.getConstant(Cnt, dl, MVT::i32));
17566   }
17567 
17568   case Intrinsic::arm_neon_vqrshifts:
17569   case Intrinsic::arm_neon_vqrshiftu:
17570     // No immediate versions of these to check for.
17571     break;
17572 
17573   case Intrinsic::arm_mve_vqdmlah:
17574   case Intrinsic::arm_mve_vqdmlash:
17575   case Intrinsic::arm_mve_vqrdmlah:
17576   case Intrinsic::arm_mve_vqrdmlash:
17577   case Intrinsic::arm_mve_vmla_n_predicated:
17578   case Intrinsic::arm_mve_vmlas_n_predicated:
17579   case Intrinsic::arm_mve_vqdmlah_predicated:
17580   case Intrinsic::arm_mve_vqdmlash_predicated:
17581   case Intrinsic::arm_mve_vqrdmlah_predicated:
17582   case Intrinsic::arm_mve_vqrdmlash_predicated: {
17583     // These intrinsics all take an i32 scalar operand which is narrowed to the
17584     // size of a single lane of the vector type they return. So we don't need
17585     // any bits of that operand above that point, which allows us to eliminate
17586     // uxth/sxth.
17587     unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
17588     APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
17589     if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI))
17590       return SDValue();
17591     break;
17592   }
17593 
17594   case Intrinsic::arm_mve_minv:
17595   case Intrinsic::arm_mve_maxv:
17596   case Intrinsic::arm_mve_minav:
17597   case Intrinsic::arm_mve_maxav:
17598   case Intrinsic::arm_mve_minv_predicated:
17599   case Intrinsic::arm_mve_maxv_predicated:
17600   case Intrinsic::arm_mve_minav_predicated:
17601   case Intrinsic::arm_mve_maxav_predicated: {
17602     // These intrinsics all take an i32 scalar operand which is narrowed to the
17603     // size of a single lane of the vector type they take as the other input.
17604     unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits();
17605     APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
17606     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
17607       return SDValue();
17608     break;
17609   }
17610 
17611   case Intrinsic::arm_mve_addv: {
17612     // Turn this intrinsic straight into the appropriate ARMISD::VADDV node,
17613     // which allow PerformADDVecReduce to turn it into VADDLV when possible.
17614     bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
17615     unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs;
17616     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1));
17617   }
17618 
17619   case Intrinsic::arm_mve_addlv:
17620   case Intrinsic::arm_mve_addlv_predicated: {
17621     // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR
17622     // which recombines the two outputs into an i64
17623     bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
17624     unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ?
17625                     (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) :
17626                     (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps);
17627 
17628     SmallVector<SDValue, 4> Ops;
17629     for (unsigned i = 1, e = N->getNumOperands(); i < e; i++)
17630       if (i != 2)                      // skip the unsigned flag
17631         Ops.push_back(N->getOperand(i));
17632 
17633     SDLoc dl(N);
17634     SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops);
17635     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0),
17636                        val.getValue(1));
17637   }
17638   }
17639 
17640   return SDValue();
17641 }
17642 
17643 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
17644 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
17645 /// combining instead of DAG legalizing because the build_vectors for 64-bit
17646 /// vector element shift counts are generally not legal, and it is hard to see
17647 /// their values after they get legalized to loads from a constant pool.
17648 static SDValue PerformShiftCombine(SDNode *N,
17649                                    TargetLowering::DAGCombinerInfo &DCI,
17650                                    const ARMSubtarget *ST) {
17651   SelectionDAG &DAG = DCI.DAG;
17652   EVT VT = N->getValueType(0);
17653 
17654   if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
17655       N->getOperand(0)->getOpcode() == ISD::AND &&
17656       N->getOperand(0)->hasOneUse()) {
17657     if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
17658       return SDValue();
17659     // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
17660     // usually show up because instcombine prefers to canonicalize it to
17661     // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
17662     // out of GEP lowering in some cases.
17663     SDValue N0 = N->getOperand(0);
17664     ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
17665     if (!ShiftAmtNode)
17666       return SDValue();
17667     uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
17668     ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
17669     if (!AndMaskNode)
17670       return SDValue();
17671     uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
17672     // Don't transform uxtb/uxth.
17673     if (AndMask == 255 || AndMask == 65535)
17674       return SDValue();
17675     if (isMask_32(AndMask)) {
17676       uint32_t MaskedBits = llvm::countl_zero(AndMask);
17677       if (MaskedBits > ShiftAmt) {
17678         SDLoc DL(N);
17679         SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
17680                                   DAG.getConstant(MaskedBits, DL, MVT::i32));
17681         return DAG.getNode(
17682             ISD::SRL, DL, MVT::i32, SHL,
17683             DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
17684       }
17685     }
17686   }
17687 
17688   // Nothing to be done for scalar shifts.
17689   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17690   if (!VT.isVector() || !TLI.isTypeLegal(VT))
17691     return SDValue();
17692   if (ST->hasMVEIntegerOps())
17693     return SDValue();
17694 
17695   int64_t Cnt;
17696 
17697   switch (N->getOpcode()) {
17698   default: llvm_unreachable("unexpected shift opcode");
17699 
17700   case ISD::SHL:
17701     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
17702       SDLoc dl(N);
17703       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
17704                          DAG.getConstant(Cnt, dl, MVT::i32));
17705     }
17706     break;
17707 
17708   case ISD::SRA:
17709   case ISD::SRL:
17710     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
17711       unsigned VShiftOpc =
17712           (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
17713       SDLoc dl(N);
17714       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
17715                          DAG.getConstant(Cnt, dl, MVT::i32));
17716     }
17717   }
17718   return SDValue();
17719 }
17720 
17721 // Look for a sign/zero/fpextend extend of a larger than legal load. This can be
17722 // split into multiple extending loads, which are simpler to deal with than an
17723 // arbitrary extend. For fp extends we use an integer extending load and a VCVTL
17724 // to convert the type to an f32.
17725 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
17726   SDValue N0 = N->getOperand(0);
17727   if (N0.getOpcode() != ISD::LOAD)
17728     return SDValue();
17729   LoadSDNode *LD = cast<LoadSDNode>(N0.getNode());
17730   if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() ||
17731       LD->getExtensionType() != ISD::NON_EXTLOAD)
17732     return SDValue();
17733   EVT FromVT = LD->getValueType(0);
17734   EVT ToVT = N->getValueType(0);
17735   if (!ToVT.isVector())
17736     return SDValue();
17737   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
17738   EVT ToEltVT = ToVT.getVectorElementType();
17739   EVT FromEltVT = FromVT.getVectorElementType();
17740 
17741   unsigned NumElements = 0;
17742   if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8)
17743     NumElements = 4;
17744   if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16)
17745     NumElements = 4;
17746   if (NumElements == 0 ||
17747       (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) ||
17748       FromVT.getVectorNumElements() % NumElements != 0 ||
17749       !isPowerOf2_32(NumElements))
17750     return SDValue();
17751 
17752   LLVMContext &C = *DAG.getContext();
17753   SDLoc DL(LD);
17754   // Details about the old load
17755   SDValue Ch = LD->getChain();
17756   SDValue BasePtr = LD->getBasePtr();
17757   Align Alignment = LD->getOriginalAlign();
17758   MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
17759   AAMDNodes AAInfo = LD->getAAInfo();
17760 
17761   ISD::LoadExtType NewExtType =
17762       N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
17763   SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
17764   EVT NewFromVT = EVT::getVectorVT(
17765       C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements);
17766   EVT NewToVT = EVT::getVectorVT(
17767       C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements);
17768 
17769   SmallVector<SDValue, 4> Loads;
17770   SmallVector<SDValue, 4> Chains;
17771   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
17772     unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
17773     SDValue NewPtr =
17774         DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
17775 
17776     SDValue NewLoad =
17777         DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
17778                     LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
17779                     Alignment, MMOFlags, AAInfo);
17780     Loads.push_back(NewLoad);
17781     Chains.push_back(SDValue(NewLoad.getNode(), 1));
17782   }
17783 
17784   // Float truncs need to extended with VCVTB's into their floating point types.
17785   if (FromEltVT == MVT::f16) {
17786     SmallVector<SDValue, 4> Extends;
17787 
17788     for (unsigned i = 0; i < Loads.size(); i++) {
17789       SDValue LoadBC =
17790           DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]);
17791       SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC,
17792                                   DAG.getConstant(0, DL, MVT::i32));
17793       Extends.push_back(FPExt);
17794     }
17795 
17796     Loads = Extends;
17797   }
17798 
17799   SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
17800   DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
17801   return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads);
17802 }
17803 
17804 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
17805 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
17806 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
17807                                     const ARMSubtarget *ST) {
17808   SDValue N0 = N->getOperand(0);
17809 
17810   // Check for sign- and zero-extensions of vector extract operations of 8- and
17811   // 16-bit vector elements. NEON and MVE support these directly. They are
17812   // handled during DAG combining because type legalization will promote them
17813   // to 32-bit types and it is messy to recognize the operations after that.
17814   if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
17815       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
17816     SDValue Vec = N0.getOperand(0);
17817     SDValue Lane = N0.getOperand(1);
17818     EVT VT = N->getValueType(0);
17819     EVT EltVT = N0.getValueType();
17820     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17821 
17822     if (VT == MVT::i32 &&
17823         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
17824         TLI.isTypeLegal(Vec.getValueType()) &&
17825         isa<ConstantSDNode>(Lane)) {
17826 
17827       unsigned Opc = 0;
17828       switch (N->getOpcode()) {
17829       default: llvm_unreachable("unexpected opcode");
17830       case ISD::SIGN_EXTEND:
17831         Opc = ARMISD::VGETLANEs;
17832         break;
17833       case ISD::ZERO_EXTEND:
17834       case ISD::ANY_EXTEND:
17835         Opc = ARMISD::VGETLANEu;
17836         break;
17837       }
17838       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
17839     }
17840   }
17841 
17842   if (ST->hasMVEIntegerOps())
17843     if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
17844       return NewLoad;
17845 
17846   return SDValue();
17847 }
17848 
17849 static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
17850                                       const ARMSubtarget *ST) {
17851   if (ST->hasMVEFloatOps())
17852     if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
17853       return NewLoad;
17854 
17855   return SDValue();
17856 }
17857 
17858 // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating
17859 // constant bounds.
17860 static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG,
17861                                          const ARMSubtarget *Subtarget) {
17862   if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) &&
17863       !Subtarget->isThumb2())
17864     return SDValue();
17865 
17866   EVT VT = Op.getValueType();
17867   SDValue Op0 = Op.getOperand(0);
17868 
17869   if (VT != MVT::i32 ||
17870       (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) ||
17871       !isa<ConstantSDNode>(Op.getOperand(1)) ||
17872       !isa<ConstantSDNode>(Op0.getOperand(1)))
17873     return SDValue();
17874 
17875   SDValue Min = Op;
17876   SDValue Max = Op0;
17877   SDValue Input = Op0.getOperand(0);
17878   if (Min.getOpcode() == ISD::SMAX)
17879     std::swap(Min, Max);
17880 
17881   APInt MinC = Min.getConstantOperandAPInt(1);
17882   APInt MaxC = Max.getConstantOperandAPInt(1);
17883 
17884   if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX ||
17885       !(MinC + 1).isPowerOf2())
17886     return SDValue();
17887 
17888   SDLoc DL(Op);
17889   if (MinC == ~MaxC)
17890     return DAG.getNode(ARMISD::SSAT, DL, VT, Input,
17891                        DAG.getConstant(MinC.countr_one(), DL, VT));
17892   if (MaxC == 0)
17893     return DAG.getNode(ARMISD::USAT, DL, VT, Input,
17894                        DAG.getConstant(MinC.countr_one(), DL, VT));
17895 
17896   return SDValue();
17897 }
17898 
17899 /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating
17900 /// saturates.
17901 static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
17902                                     const ARMSubtarget *ST) {
17903   EVT VT = N->getValueType(0);
17904   SDValue N0 = N->getOperand(0);
17905 
17906   if (VT == MVT::i32)
17907     return PerformMinMaxToSatCombine(SDValue(N, 0), DAG, ST);
17908 
17909   if (!ST->hasMVEIntegerOps())
17910     return SDValue();
17911 
17912   if (SDValue V = PerformVQDMULHCombine(N, DAG))
17913     return V;
17914 
17915   if (VT != MVT::v4i32 && VT != MVT::v8i16)
17916     return SDValue();
17917 
17918   auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) {
17919     // Check one is a smin and the other is a smax
17920     if (Min->getOpcode() != ISD::SMIN)
17921       std::swap(Min, Max);
17922     if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX)
17923       return false;
17924 
17925     APInt SaturateC;
17926     if (VT == MVT::v4i32)
17927       SaturateC = APInt(32, (1 << 15) - 1, true);
17928     else //if (VT == MVT::v8i16)
17929       SaturateC = APInt(16, (1 << 7) - 1, true);
17930 
17931     APInt MinC, MaxC;
17932     if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
17933         MinC != SaturateC)
17934       return false;
17935     if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) ||
17936         MaxC != ~SaturateC)
17937       return false;
17938     return true;
17939   };
17940 
17941   if (IsSignedSaturate(N, N0.getNode())) {
17942     SDLoc DL(N);
17943     MVT ExtVT, HalfVT;
17944     if (VT == MVT::v4i32) {
17945       HalfVT = MVT::v8i16;
17946       ExtVT = MVT::v4i16;
17947     } else { // if (VT == MVT::v8i16)
17948       HalfVT = MVT::v16i8;
17949       ExtVT = MVT::v8i8;
17950     }
17951 
17952     // Create a VQMOVNB with undef top lanes, then signed extended into the top
17953     // half. That extend will hopefully be removed if only the bottom bits are
17954     // demanded (though a truncating store, for example).
17955     SDValue VQMOVN =
17956         DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT),
17957                     N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32));
17958     SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
17959     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast,
17960                        DAG.getValueType(ExtVT));
17961   }
17962 
17963   auto IsUnsignedSaturate = [&](SDNode *Min) {
17964     // For unsigned, we just need to check for <= 0xffff
17965     if (Min->getOpcode() != ISD::UMIN)
17966       return false;
17967 
17968     APInt SaturateC;
17969     if (VT == MVT::v4i32)
17970       SaturateC = APInt(32, (1 << 16) - 1, true);
17971     else //if (VT == MVT::v8i16)
17972       SaturateC = APInt(16, (1 << 8) - 1, true);
17973 
17974     APInt MinC;
17975     if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
17976         MinC != SaturateC)
17977       return false;
17978     return true;
17979   };
17980 
17981   if (IsUnsignedSaturate(N)) {
17982     SDLoc DL(N);
17983     MVT HalfVT;
17984     unsigned ExtConst;
17985     if (VT == MVT::v4i32) {
17986       HalfVT = MVT::v8i16;
17987       ExtConst = 0x0000FFFF;
17988     } else { //if (VT == MVT::v8i16)
17989       HalfVT = MVT::v16i8;
17990       ExtConst = 0x00FF;
17991     }
17992 
17993     // Create a VQMOVNB with undef top lanes, then ZExt into the top half with
17994     // an AND. That extend will hopefully be removed if only the bottom bits are
17995     // demanded (though a truncating store, for example).
17996     SDValue VQMOVN =
17997         DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0,
17998                     DAG.getConstant(0, DL, MVT::i32));
17999     SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
18000     return DAG.getNode(ISD::AND, DL, VT, Bitcast,
18001                        DAG.getConstant(ExtConst, DL, VT));
18002   }
18003 
18004   return SDValue();
18005 }
18006 
18007 static const APInt *isPowerOf2Constant(SDValue V) {
18008   ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
18009   if (!C)
18010     return nullptr;
18011   const APInt *CV = &C->getAPIntValue();
18012   return CV->isPowerOf2() ? CV : nullptr;
18013 }
18014 
18015 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
18016   // If we have a CMOV, OR and AND combination such as:
18017   //   if (x & CN)
18018   //     y |= CM;
18019   //
18020   // And:
18021   //   * CN is a single bit;
18022   //   * All bits covered by CM are known zero in y
18023   //
18024   // Then we can convert this into a sequence of BFI instructions. This will
18025   // always be a win if CM is a single bit, will always be no worse than the
18026   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
18027   // three bits (due to the extra IT instruction).
18028 
18029   SDValue Op0 = CMOV->getOperand(0);
18030   SDValue Op1 = CMOV->getOperand(1);
18031   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
18032   auto CC = CCNode->getAPIntValue().getLimitedValue();
18033   SDValue CmpZ = CMOV->getOperand(4);
18034 
18035   // The compare must be against zero.
18036   if (!isNullConstant(CmpZ->getOperand(1)))
18037     return SDValue();
18038 
18039   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
18040   SDValue And = CmpZ->getOperand(0);
18041   if (And->getOpcode() != ISD::AND)
18042     return SDValue();
18043   const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
18044   if (!AndC)
18045     return SDValue();
18046   SDValue X = And->getOperand(0);
18047 
18048   if (CC == ARMCC::EQ) {
18049     // We're performing an "equal to zero" compare. Swap the operands so we
18050     // canonicalize on a "not equal to zero" compare.
18051     std::swap(Op0, Op1);
18052   } else {
18053     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
18054   }
18055 
18056   if (Op1->getOpcode() != ISD::OR)
18057     return SDValue();
18058 
18059   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
18060   if (!OrC)
18061     return SDValue();
18062   SDValue Y = Op1->getOperand(0);
18063 
18064   if (Op0 != Y)
18065     return SDValue();
18066 
18067   // Now, is it profitable to continue?
18068   APInt OrCI = OrC->getAPIntValue();
18069   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
18070   if (OrCI.popcount() > Heuristic)
18071     return SDValue();
18072 
18073   // Lastly, can we determine that the bits defined by OrCI
18074   // are zero in Y?
18075   KnownBits Known = DAG.computeKnownBits(Y);
18076   if ((OrCI & Known.Zero) != OrCI)
18077     return SDValue();
18078 
18079   // OK, we can do the combine.
18080   SDValue V = Y;
18081   SDLoc dl(X);
18082   EVT VT = X.getValueType();
18083   unsigned BitInX = AndC->logBase2();
18084 
18085   if (BitInX != 0) {
18086     // We must shift X first.
18087     X = DAG.getNode(ISD::SRL, dl, VT, X,
18088                     DAG.getConstant(BitInX, dl, VT));
18089   }
18090 
18091   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
18092        BitInY < NumActiveBits; ++BitInY) {
18093     if (OrCI[BitInY] == 0)
18094       continue;
18095     APInt Mask(VT.getSizeInBits(), 0);
18096     Mask.setBit(BitInY);
18097     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
18098                     // Confusingly, the operand is an *inverted* mask.
18099                     DAG.getConstant(~Mask, dl, VT));
18100   }
18101 
18102   return V;
18103 }
18104 
18105 // Given N, the value controlling the conditional branch, search for the loop
18106 // intrinsic, returning it, along with how the value is used. We need to handle
18107 // patterns such as the following:
18108 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
18109 // (brcond (setcc (loop.decrement), 0, eq), exit)
18110 // (brcond (setcc (loop.decrement), 0, ne), header)
18111 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
18112                                    bool &Negate) {
18113   switch (N->getOpcode()) {
18114   default:
18115     break;
18116   case ISD::XOR: {
18117     if (!isa<ConstantSDNode>(N.getOperand(1)))
18118       return SDValue();
18119     if (!cast<ConstantSDNode>(N.getOperand(1))->isOne())
18120       return SDValue();
18121     Negate = !Negate;
18122     return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate);
18123   }
18124   case ISD::SETCC: {
18125     auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1));
18126     if (!Const)
18127       return SDValue();
18128     if (Const->isZero())
18129       Imm = 0;
18130     else if (Const->isOne())
18131       Imm = 1;
18132     else
18133       return SDValue();
18134     CC = cast<CondCodeSDNode>(N.getOperand(2))->get();
18135     return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
18136   }
18137   case ISD::INTRINSIC_W_CHAIN: {
18138     unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
18139     if (IntOp != Intrinsic::test_start_loop_iterations &&
18140         IntOp != Intrinsic::loop_decrement_reg)
18141       return SDValue();
18142     return N;
18143   }
18144   }
18145   return SDValue();
18146 }
18147 
18148 static SDValue PerformHWLoopCombine(SDNode *N,
18149                                     TargetLowering::DAGCombinerInfo &DCI,
18150                                     const ARMSubtarget *ST) {
18151 
18152   // The hwloop intrinsics that we're interested are used for control-flow,
18153   // either for entering or exiting the loop:
18154   // - test.start.loop.iterations will test whether its operand is zero. If it
18155   //   is zero, the proceeding branch should not enter the loop.
18156   // - loop.decrement.reg also tests whether its operand is zero. If it is
18157   //   zero, the proceeding branch should not branch back to the beginning of
18158   //   the loop.
18159   // So here, we need to check that how the brcond is using the result of each
18160   // of the intrinsics to ensure that we're branching to the right place at the
18161   // right time.
18162 
18163   ISD::CondCode CC;
18164   SDValue Cond;
18165   int Imm = 1;
18166   bool Negate = false;
18167   SDValue Chain = N->getOperand(0);
18168   SDValue Dest;
18169 
18170   if (N->getOpcode() == ISD::BRCOND) {
18171     CC = ISD::SETEQ;
18172     Cond = N->getOperand(1);
18173     Dest = N->getOperand(2);
18174   } else {
18175     assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!");
18176     CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
18177     Cond = N->getOperand(2);
18178     Dest = N->getOperand(4);
18179     if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) {
18180       if (!Const->isOne() && !Const->isZero())
18181         return SDValue();
18182       Imm = Const->getZExtValue();
18183     } else
18184       return SDValue();
18185   }
18186 
18187   SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate);
18188   if (!Int)
18189     return SDValue();
18190 
18191   if (Negate)
18192     CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32);
18193 
18194   auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) {
18195     return (CC == ISD::SETEQ && Imm == 0) ||
18196            (CC == ISD::SETNE && Imm == 1) ||
18197            (CC == ISD::SETLT && Imm == 1) ||
18198            (CC == ISD::SETULT && Imm == 1);
18199   };
18200 
18201   auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) {
18202     return (CC == ISD::SETEQ && Imm == 1) ||
18203            (CC == ISD::SETNE && Imm == 0) ||
18204            (CC == ISD::SETGT && Imm == 0) ||
18205            (CC == ISD::SETUGT && Imm == 0) ||
18206            (CC == ISD::SETGE && Imm == 1) ||
18207            (CC == ISD::SETUGE && Imm == 1);
18208   };
18209 
18210   assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
18211          "unsupported condition");
18212 
18213   SDLoc dl(Int);
18214   SelectionDAG &DAG = DCI.DAG;
18215   SDValue Elements = Int.getOperand(2);
18216   unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
18217   assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
18218           && "expected single br user");
18219   SDNode *Br = *N->use_begin();
18220   SDValue OtherTarget = Br->getOperand(1);
18221 
18222   // Update the unconditional branch to branch to the given Dest.
18223   auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) {
18224     SDValue NewBrOps[] = { Br->getOperand(0), Dest };
18225     SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps);
18226     DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr);
18227   };
18228 
18229   if (IntOp == Intrinsic::test_start_loop_iterations) {
18230     SDValue Res;
18231     SDValue Setup = DAG.getNode(ARMISD::WLSSETUP, dl, MVT::i32, Elements);
18232     // We expect this 'instruction' to branch when the counter is zero.
18233     if (IsTrueIfZero(CC, Imm)) {
18234       SDValue Ops[] = {Chain, Setup, Dest};
18235       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
18236     } else {
18237       // The logic is the reverse of what we need for WLS, so find the other
18238       // basic block target: the target of the proceeding br.
18239       UpdateUncondBr(Br, Dest, DAG);
18240 
18241       SDValue Ops[] = {Chain, Setup, OtherTarget};
18242       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
18243     }
18244     // Update LR count to the new value
18245     DAG.ReplaceAllUsesOfValueWith(Int.getValue(0), Setup);
18246     // Update chain
18247     DAG.ReplaceAllUsesOfValueWith(Int.getValue(2), Int.getOperand(0));
18248     return Res;
18249   } else {
18250     SDValue Size = DAG.getTargetConstant(
18251       cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
18252     SDValue Args[] = { Int.getOperand(0), Elements, Size, };
18253     SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
18254                                   DAG.getVTList(MVT::i32, MVT::Other), Args);
18255     DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode());
18256 
18257     // We expect this instruction to branch when the count is not zero.
18258     SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;
18259 
18260     // Update the unconditional branch to target the loop preheader if we've
18261     // found the condition has been reversed.
18262     if (Target == OtherTarget)
18263       UpdateUncondBr(Br, Dest, DAG);
18264 
18265     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
18266                         SDValue(LoopDec.getNode(), 1), Chain);
18267 
18268     SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target };
18269     return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs);
18270   }
18271   return SDValue();
18272 }
18273 
18274 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
18275 SDValue
18276 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
18277   SDValue Cmp = N->getOperand(4);
18278   if (Cmp.getOpcode() != ARMISD::CMPZ)
18279     // Only looking at NE cases.
18280     return SDValue();
18281 
18282   EVT VT = N->getValueType(0);
18283   SDLoc dl(N);
18284   SDValue LHS = Cmp.getOperand(0);
18285   SDValue RHS = Cmp.getOperand(1);
18286   SDValue Chain = N->getOperand(0);
18287   SDValue BB = N->getOperand(1);
18288   SDValue ARMcc = N->getOperand(2);
18289   ARMCC::CondCodes CC =
18290     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
18291 
18292   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
18293   // -> (brcond Chain BB CC CPSR Cmp)
18294   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
18295       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
18296       LHS->getOperand(0)->hasOneUse() &&
18297       isNullConstant(LHS->getOperand(0)->getOperand(0)) &&
18298       isOneConstant(LHS->getOperand(0)->getOperand(1)) &&
18299       isOneConstant(LHS->getOperand(1)) && isNullConstant(RHS)) {
18300     return DAG.getNode(
18301         ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
18302         LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
18303   }
18304 
18305   return SDValue();
18306 }
18307 
18308 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
18309 SDValue
18310 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
18311   SDValue Cmp = N->getOperand(4);
18312   if (Cmp.getOpcode() != ARMISD::CMPZ)
18313     // Only looking at EQ and NE cases.
18314     return SDValue();
18315 
18316   EVT VT = N->getValueType(0);
18317   SDLoc dl(N);
18318   SDValue LHS = Cmp.getOperand(0);
18319   SDValue RHS = Cmp.getOperand(1);
18320   SDValue FalseVal = N->getOperand(0);
18321   SDValue TrueVal = N->getOperand(1);
18322   SDValue ARMcc = N->getOperand(2);
18323   ARMCC::CondCodes CC =
18324     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
18325 
18326   // BFI is only available on V6T2+.
18327   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
18328     SDValue R = PerformCMOVToBFICombine(N, DAG);
18329     if (R)
18330       return R;
18331   }
18332 
18333   // Simplify
18334   //   mov     r1, r0
18335   //   cmp     r1, x
18336   //   mov     r0, y
18337   //   moveq   r0, x
18338   // to
18339   //   cmp     r0, x
18340   //   movne   r0, y
18341   //
18342   //   mov     r1, r0
18343   //   cmp     r1, x
18344   //   mov     r0, x
18345   //   movne   r0, y
18346   // to
18347   //   cmp     r0, x
18348   //   movne   r0, y
18349   /// FIXME: Turn this into a target neutral optimization?
18350   SDValue Res;
18351   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
18352     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
18353                       N->getOperand(3), Cmp);
18354   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
18355     SDValue ARMcc;
18356     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
18357     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
18358                       N->getOperand(3), NewCmp);
18359   }
18360 
18361   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
18362   // -> (cmov F T CC CPSR Cmp)
18363   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse() &&
18364       isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
18365       isNullConstant(RHS)) {
18366     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
18367                        LHS->getOperand(2), LHS->getOperand(3),
18368                        LHS->getOperand(4));
18369   }
18370 
18371   if (!VT.isInteger())
18372       return SDValue();
18373 
18374   // Fold away an unneccessary CMPZ/CMOV
18375   // CMOV A, B, C1, $cpsr, (CMPZ (CMOV 1, 0, C2, D), 0) ->
18376   // if C1==EQ -> CMOV A, B, C2, $cpsr, D
18377   // if C1==NE -> CMOV A, B, NOT(C2), $cpsr, D
18378   if (N->getConstantOperandVal(2) == ARMCC::EQ ||
18379       N->getConstantOperandVal(2) == ARMCC::NE) {
18380     ARMCC::CondCodes Cond;
18381     if (SDValue C = IsCMPZCSINC(N->getOperand(4).getNode(), Cond)) {
18382       if (N->getConstantOperandVal(2) == ARMCC::NE)
18383         Cond = ARMCC::getOppositeCondition(Cond);
18384       return DAG.getNode(N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0),
18385                          N->getOperand(1),
18386                          DAG.getTargetConstant(Cond, SDLoc(N), MVT::i32),
18387                          N->getOperand(3), C);
18388     }
18389   }
18390 
18391   // Materialize a boolean comparison for integers so we can avoid branching.
18392   if (isNullConstant(FalseVal)) {
18393     if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
18394       if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
18395         // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
18396         // right 5 bits will make that 32 be 1, otherwise it will be 0.
18397         // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
18398         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
18399         Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
18400                           DAG.getConstant(5, dl, MVT::i32));
18401       } else {
18402         // CMOV 0, 1, ==, (CMPZ x, y) ->
18403         //     (UADDO_CARRY (SUB x, y), t:0, t:1)
18404         // where t = (USUBO_CARRY 0, (SUB x, y), 0)
18405         //
18406         // The USUBO_CARRY computes 0 - (x - y) and this will give a borrow when
18407         // x != y. In other words, a carry C == 1 when x == y, C == 0
18408         // otherwise.
18409         // The final UADDO_CARRY computes
18410         //     x - y + (0 - (x - y)) + C == C
18411         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
18412         SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18413         SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
18414         // ISD::USUBO_CARRY returns a borrow but we want the carry here
18415         // actually.
18416         SDValue Carry =
18417             DAG.getNode(ISD::SUB, dl, MVT::i32,
18418                         DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
18419         Res = DAG.getNode(ISD::UADDO_CARRY, dl, VTs, Sub, Neg, Carry);
18420       }
18421     } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
18422                (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
18423       // This seems pointless but will allow us to combine it further below.
18424       // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
18425       SDValue Sub =
18426           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
18427       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
18428                                           Sub.getValue(1), SDValue());
18429       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
18430                         N->getOperand(3), CPSRGlue.getValue(1));
18431       FalseVal = Sub;
18432     }
18433   } else if (isNullConstant(TrueVal)) {
18434     if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
18435         (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
18436       // This seems pointless but will allow us to combine it further below
18437       // Note that we change == for != as this is the dual for the case above.
18438       // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
18439       SDValue Sub =
18440           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
18441       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
18442                                           Sub.getValue(1), SDValue());
18443       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
18444                         DAG.getConstant(ARMCC::NE, dl, MVT::i32),
18445                         N->getOperand(3), CPSRGlue.getValue(1));
18446       FalseVal = Sub;
18447     }
18448   }
18449 
18450   // On Thumb1, the DAG above may be further combined if z is a power of 2
18451   // (z == 2 ^ K).
18452   // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
18453   // t1 = (USUBO (SUB x, y), 1)
18454   // t2 = (USUBO_CARRY (SUB x, y), t1:0, t1:1)
18455   // Result = if K != 0 then (SHL t2:0, K) else t2:0
18456   //
18457   // This also handles the special case of comparing against zero; it's
18458   // essentially, the same pattern, except there's no SUBS:
18459   // CMOV x, z, !=, (CMPZ x, 0) ->
18460   // t1 = (USUBO x, 1)
18461   // t2 = (USUBO_CARRY x, t1:0, t1:1)
18462   // Result = if K != 0 then (SHL t2:0, K) else t2:0
18463   const APInt *TrueConst;
18464   if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
18465       ((FalseVal.getOpcode() == ARMISD::SUBS &&
18466         FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
18467        (FalseVal == LHS && isNullConstant(RHS))) &&
18468       (TrueConst = isPowerOf2Constant(TrueVal))) {
18469     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18470     unsigned ShiftAmount = TrueConst->logBase2();
18471     if (ShiftAmount)
18472       TrueVal = DAG.getConstant(1, dl, VT);
18473     SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
18474     Res = DAG.getNode(ISD::USUBO_CARRY, dl, VTs, FalseVal, Subc,
18475                       Subc.getValue(1));
18476 
18477     if (ShiftAmount)
18478       Res = DAG.getNode(ISD::SHL, dl, VT, Res,
18479                         DAG.getConstant(ShiftAmount, dl, MVT::i32));
18480   }
18481 
18482   if (Res.getNode()) {
18483     KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
18484     // Capture demanded bits information that would be otherwise lost.
18485     if (Known.Zero == 0xfffffffe)
18486       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
18487                         DAG.getValueType(MVT::i1));
18488     else if (Known.Zero == 0xffffff00)
18489       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
18490                         DAG.getValueType(MVT::i8));
18491     else if (Known.Zero == 0xffff0000)
18492       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
18493                         DAG.getValueType(MVT::i16));
18494   }
18495 
18496   return Res;
18497 }
18498 
18499 static SDValue PerformBITCASTCombine(SDNode *N,
18500                                      TargetLowering::DAGCombinerInfo &DCI,
18501                                      const ARMSubtarget *ST) {
18502   SelectionDAG &DAG = DCI.DAG;
18503   SDValue Src = N->getOperand(0);
18504   EVT DstVT = N->getValueType(0);
18505 
18506   // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE.
18507   if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) {
18508     EVT SrcVT = Src.getValueType();
18509     if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits())
18510       return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0));
18511   }
18512 
18513   // We may have a bitcast of something that has already had this bitcast
18514   // combine performed on it, so skip past any VECTOR_REG_CASTs.
18515   while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST)
18516     Src = Src.getOperand(0);
18517 
18518   // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that
18519   // would be generated is at least the width of the element type.
18520   EVT SrcVT = Src.getValueType();
18521   if ((Src.getOpcode() == ARMISD::VMOVIMM ||
18522        Src.getOpcode() == ARMISD::VMVNIMM ||
18523        Src.getOpcode() == ARMISD::VMOVFPIMM) &&
18524       SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() &&
18525       DAG.getDataLayout().isBigEndian())
18526     return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src);
18527 
18528   // bitcast(extract(x, n)); bitcast(extract(x, n+1))  ->  VMOVRRD x
18529   if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI))
18530     return R;
18531 
18532   return SDValue();
18533 }
18534 
18535 // Some combines for the MVETrunc truncations legalizer helper. Also lowers the
18536 // node into stack operations after legalizeOps.
18537 SDValue ARMTargetLowering::PerformMVETruncCombine(
18538     SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const {
18539   SelectionDAG &DAG = DCI.DAG;
18540   EVT VT = N->getValueType(0);
18541   SDLoc DL(N);
18542 
18543   // MVETrunc(Undef, Undef) -> Undef
18544   if (all_of(N->ops(), [](SDValue Op) { return Op.isUndef(); }))
18545     return DAG.getUNDEF(VT);
18546 
18547   // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc
18548   if (N->getNumOperands() == 2 &&
18549       N->getOperand(0).getOpcode() == ARMISD::MVETRUNC &&
18550       N->getOperand(1).getOpcode() == ARMISD::MVETRUNC)
18551     return DAG.getNode(ARMISD::MVETRUNC, DL, VT, N->getOperand(0).getOperand(0),
18552                        N->getOperand(0).getOperand(1),
18553                        N->getOperand(1).getOperand(0),
18554                        N->getOperand(1).getOperand(1));
18555 
18556   // MVETrunc(shuffle, shuffle) -> VMOVN
18557   if (N->getNumOperands() == 2 &&
18558       N->getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
18559       N->getOperand(1).getOpcode() == ISD::VECTOR_SHUFFLE) {
18560     auto *S0 = cast<ShuffleVectorSDNode>(N->getOperand(0).getNode());
18561     auto *S1 = cast<ShuffleVectorSDNode>(N->getOperand(1).getNode());
18562 
18563     if (S0->getOperand(0) == S1->getOperand(0) &&
18564         S0->getOperand(1) == S1->getOperand(1)) {
18565       // Construct complete shuffle mask
18566       SmallVector<int, 8> Mask(S0->getMask());
18567       Mask.append(S1->getMask().begin(), S1->getMask().end());
18568 
18569       if (isVMOVNTruncMask(Mask, VT, false))
18570         return DAG.getNode(
18571             ARMISD::VMOVN, DL, VT,
18572             DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(0)),
18573             DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)),
18574             DAG.getConstant(1, DL, MVT::i32));
18575       if (isVMOVNTruncMask(Mask, VT, true))
18576         return DAG.getNode(
18577             ARMISD::VMOVN, DL, VT,
18578             DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(1)),
18579             DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, S0->getOperand(0)),
18580             DAG.getConstant(1, DL, MVT::i32));
18581     }
18582   }
18583 
18584   // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the
18585   // truncate to a buildvector to allow the generic optimisations to kick in.
18586   if (all_of(N->ops(), [](SDValue Op) {
18587         return Op.getOpcode() == ISD::BUILD_VECTOR ||
18588                Op.getOpcode() == ISD::VECTOR_SHUFFLE ||
18589                (Op.getOpcode() == ISD::BITCAST &&
18590                 Op.getOperand(0).getOpcode() == ISD::BUILD_VECTOR);
18591       })) {
18592     SmallVector<SDValue, 8> Extracts;
18593     for (unsigned Op = 0; Op < N->getNumOperands(); Op++) {
18594       SDValue O = N->getOperand(Op);
18595       for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) {
18596         SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, O,
18597                                   DAG.getConstant(i, DL, MVT::i32));
18598         Extracts.push_back(Ext);
18599       }
18600     }
18601     return DAG.getBuildVector(VT, DL, Extracts);
18602   }
18603 
18604   // If we are late in the legalization process and nothing has optimised
18605   // the trunc to anything better, lower it to a stack store and reload,
18606   // performing the truncation whilst keeping the lanes in the correct order:
18607   //   VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack;
18608   if (!DCI.isAfterLegalizeDAG())
18609     return SDValue();
18610 
18611   SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::getFixed(16), Align(4));
18612   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
18613   int NumIns = N->getNumOperands();
18614   assert((NumIns == 2 || NumIns == 4) &&
18615          "Expected 2 or 4 inputs to an MVETrunc");
18616   EVT StoreVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
18617   if (N->getNumOperands() == 4)
18618     StoreVT = StoreVT.getHalfNumVectorElementsVT(*DAG.getContext());
18619 
18620   SmallVector<SDValue> Chains;
18621   for (int I = 0; I < NumIns; I++) {
18622     SDValue Ptr = DAG.getNode(
18623         ISD::ADD, DL, StackPtr.getValueType(), StackPtr,
18624         DAG.getConstant(I * 16 / NumIns, DL, StackPtr.getValueType()));
18625     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(
18626         DAG.getMachineFunction(), SPFI, I * 16 / NumIns);
18627     SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), DL, N->getOperand(I),
18628                                    Ptr, MPI, StoreVT, Align(4));
18629     Chains.push_back(Ch);
18630   }
18631 
18632   SDValue Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
18633   MachinePointerInfo MPI =
18634       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI, 0);
18635   return DAG.getLoad(VT, DL, Chain, StackPtr, MPI, Align(4));
18636 }
18637 
18638 // Take a MVEEXT(load x) and split that into (extload x, extload x+8)
18639 static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N,
18640                                                     SelectionDAG &DAG) {
18641   SDValue N0 = N->getOperand(0);
18642   LoadSDNode *LD = dyn_cast<LoadSDNode>(N0.getNode());
18643   if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed())
18644     return SDValue();
18645 
18646   EVT FromVT = LD->getMemoryVT();
18647   EVT ToVT = N->getValueType(0);
18648   if (!ToVT.isVector())
18649     return SDValue();
18650   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2);
18651   EVT ToEltVT = ToVT.getVectorElementType();
18652   EVT FromEltVT = FromVT.getVectorElementType();
18653 
18654   unsigned NumElements = 0;
18655   if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8))
18656     NumElements = 4;
18657   if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8)
18658     NumElements = 8;
18659   assert(NumElements != 0);
18660 
18661   ISD::LoadExtType NewExtType =
18662       N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
18663   if (LD->getExtensionType() != ISD::NON_EXTLOAD &&
18664       LD->getExtensionType() != ISD::EXTLOAD &&
18665       LD->getExtensionType() != NewExtType)
18666     return SDValue();
18667 
18668   LLVMContext &C = *DAG.getContext();
18669   SDLoc DL(LD);
18670   // Details about the old load
18671   SDValue Ch = LD->getChain();
18672   SDValue BasePtr = LD->getBasePtr();
18673   Align Alignment = LD->getOriginalAlign();
18674   MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
18675   AAMDNodes AAInfo = LD->getAAInfo();
18676 
18677   SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
18678   EVT NewFromVT = EVT::getVectorVT(
18679       C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements);
18680   EVT NewToVT = EVT::getVectorVT(
18681       C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements);
18682 
18683   SmallVector<SDValue, 4> Loads;
18684   SmallVector<SDValue, 4> Chains;
18685   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
18686     unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
18687     SDValue NewPtr =
18688         DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
18689 
18690     SDValue NewLoad =
18691         DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
18692                     LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
18693                     Alignment, MMOFlags, AAInfo);
18694     Loads.push_back(NewLoad);
18695     Chains.push_back(SDValue(NewLoad.getNode(), 1));
18696   }
18697 
18698   SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
18699   DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
18700   return DAG.getMergeValues(Loads, DL);
18701 }
18702 
18703 // Perform combines for MVEEXT. If it has not be optimized to anything better
18704 // before lowering, it gets converted to stack store and extloads performing the
18705 // extend whilst still keeping the same lane ordering.
18706 SDValue ARMTargetLowering::PerformMVEExtCombine(
18707     SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const {
18708   SelectionDAG &DAG = DCI.DAG;
18709   EVT VT = N->getValueType(0);
18710   SDLoc DL(N);
18711   assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements");
18712   assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type");
18713 
18714   EVT ExtVT = N->getOperand(0).getValueType().getHalfNumVectorElementsVT(
18715       *DAG.getContext());
18716   auto Extend = [&](SDValue V) {
18717     SDValue VVT = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, V);
18718     return N->getOpcode() == ARMISD::MVESEXT
18719                ? DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, VVT,
18720                              DAG.getValueType(ExtVT))
18721                : DAG.getZeroExtendInReg(VVT, DL, ExtVT);
18722   };
18723 
18724   // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP)
18725   if (N->getOperand(0).getOpcode() == ARMISD::VDUP) {
18726     SDValue Ext = Extend(N->getOperand(0));
18727     return DAG.getMergeValues({Ext, Ext}, DL);
18728   }
18729 
18730   // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG
18731   if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0))) {
18732     ArrayRef<int> Mask = SVN->getMask();
18733     assert(Mask.size() == 2 * VT.getVectorNumElements());
18734     assert(Mask.size() == SVN->getValueType(0).getVectorNumElements());
18735     unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16;
18736     SDValue Op0 = SVN->getOperand(0);
18737     SDValue Op1 = SVN->getOperand(1);
18738 
18739     auto CheckInregMask = [&](int Start, int Offset) {
18740       for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx)
18741         if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset)
18742           return false;
18743       return true;
18744     };
18745     SDValue V0 = SDValue(N, 0);
18746     SDValue V1 = SDValue(N, 1);
18747     if (CheckInregMask(0, 0))
18748       V0 = Extend(Op0);
18749     else if (CheckInregMask(0, 1))
18750       V0 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op0));
18751     else if (CheckInregMask(0, Mask.size()))
18752       V0 = Extend(Op1);
18753     else if (CheckInregMask(0, Mask.size() + 1))
18754       V0 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op1));
18755 
18756     if (CheckInregMask(VT.getVectorNumElements(), Mask.size()))
18757       V1 = Extend(Op1);
18758     else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1))
18759       V1 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op1));
18760     else if (CheckInregMask(VT.getVectorNumElements(), 0))
18761       V1 = Extend(Op0);
18762     else if (CheckInregMask(VT.getVectorNumElements(), 1))
18763       V1 = Extend(DAG.getNode(Rev, DL, SVN->getValueType(0), Op0));
18764 
18765     if (V0.getNode() != N || V1.getNode() != N)
18766       return DAG.getMergeValues({V0, V1}, DL);
18767   }
18768 
18769   // MVEEXT(load) -> extload, extload
18770   if (N->getOperand(0)->getOpcode() == ISD::LOAD)
18771     if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG))
18772       return L;
18773 
18774   if (!DCI.isAfterLegalizeDAG())
18775     return SDValue();
18776 
18777   // Lower to a stack store and reload:
18778   //  VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8;
18779   SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::getFixed(16), Align(4));
18780   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
18781   int NumOuts = N->getNumValues();
18782   assert((NumOuts == 2 || NumOuts == 4) &&
18783          "Expected 2 or 4 outputs to an MVEEXT");
18784   EVT LoadVT = N->getOperand(0).getValueType().getHalfNumVectorElementsVT(
18785       *DAG.getContext());
18786   if (N->getNumOperands() == 4)
18787     LoadVT = LoadVT.getHalfNumVectorElementsVT(*DAG.getContext());
18788 
18789   MachinePointerInfo MPI =
18790       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI, 0);
18791   SDValue Chain = DAG.getStore(DAG.getEntryNode(), DL, N->getOperand(0),
18792                                StackPtr, MPI, Align(4));
18793 
18794   SmallVector<SDValue> Loads;
18795   for (int I = 0; I < NumOuts; I++) {
18796     SDValue Ptr = DAG.getNode(
18797         ISD::ADD, DL, StackPtr.getValueType(), StackPtr,
18798         DAG.getConstant(I * 16 / NumOuts, DL, StackPtr.getValueType()));
18799     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(
18800         DAG.getMachineFunction(), SPFI, I * 16 / NumOuts);
18801     SDValue Load = DAG.getExtLoad(
18802         N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, DL,
18803         VT, Chain, Ptr, MPI, LoadVT, Align(4));
18804     Loads.push_back(Load);
18805   }
18806 
18807   return DAG.getMergeValues(Loads, DL);
18808 }
18809 
18810 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
18811                                              DAGCombinerInfo &DCI) const {
18812   switch (N->getOpcode()) {
18813   default: break;
18814   case ISD::SELECT_CC:
18815   case ISD::SELECT:     return PerformSELECTCombine(N, DCI, Subtarget);
18816   case ISD::VSELECT:    return PerformVSELECTCombine(N, DCI, Subtarget);
18817   case ISD::SETCC:      return PerformVSetCCToVCTPCombine(N, DCI, Subtarget);
18818   case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
18819   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
18820   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
18821   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
18822   case ISD::SUB:        return PerformSUBCombine(N, DCI, Subtarget);
18823   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
18824   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
18825   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
18826   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
18827   case ISD::BRCOND:
18828   case ISD::BR_CC:      return PerformHWLoopCombine(N, DCI, Subtarget);
18829   case ARMISD::ADDC:
18830   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI, Subtarget);
18831   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI, Subtarget);
18832   case ARMISD::BFI:     return PerformBFICombine(N, DCI.DAG);
18833   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
18834   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
18835   case ARMISD::VMOVhr:  return PerformVMOVhrCombine(N, DCI);
18836   case ARMISD::VMOVrh:  return PerformVMOVrhCombine(N, DCI.DAG);
18837   case ISD::STORE:      return PerformSTORECombine(N, DCI, Subtarget);
18838   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
18839   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
18840   case ISD::EXTRACT_VECTOR_ELT:
18841     return PerformExtractEltCombine(N, DCI, Subtarget);
18842   case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DCI.DAG);
18843   case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI);
18844   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
18845   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget);
18846   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI.DAG, Subtarget);
18847   case ISD::FP_TO_SINT:
18848   case ISD::FP_TO_UINT:
18849     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
18850   case ISD::FADD:
18851     return PerformFADDCombine(N, DCI.DAG, Subtarget);
18852   case ISD::FDIV:
18853     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
18854   case ISD::INTRINSIC_WO_CHAIN:
18855     return PerformIntrinsicCombine(N, DCI);
18856   case ISD::SHL:
18857   case ISD::SRA:
18858   case ISD::SRL:
18859     return PerformShiftCombine(N, DCI, Subtarget);
18860   case ISD::SIGN_EXTEND:
18861   case ISD::ZERO_EXTEND:
18862   case ISD::ANY_EXTEND:
18863     return PerformExtendCombine(N, DCI.DAG, Subtarget);
18864   case ISD::FP_EXTEND:
18865     return PerformFPExtendCombine(N, DCI.DAG, Subtarget);
18866   case ISD::SMIN:
18867   case ISD::UMIN:
18868   case ISD::SMAX:
18869   case ISD::UMAX:
18870     return PerformMinMaxCombine(N, DCI.DAG, Subtarget);
18871   case ARMISD::CMOV:
18872     return PerformCMOVCombine(N, DCI.DAG);
18873   case ARMISD::BRCOND:
18874     return PerformBRCONDCombine(N, DCI.DAG);
18875   case ARMISD::CMPZ:
18876     return PerformCMPZCombine(N, DCI.DAG);
18877   case ARMISD::CSINC:
18878   case ARMISD::CSINV:
18879   case ARMISD::CSNEG:
18880     return PerformCSETCombine(N, DCI.DAG);
18881   case ISD::LOAD:
18882     return PerformLOADCombine(N, DCI, Subtarget);
18883   case ARMISD::VLD1DUP:
18884   case ARMISD::VLD2DUP:
18885   case ARMISD::VLD3DUP:
18886   case ARMISD::VLD4DUP:
18887     return PerformVLDCombine(N, DCI);
18888   case ARMISD::BUILD_VECTOR:
18889     return PerformARMBUILD_VECTORCombine(N, DCI);
18890   case ISD::BITCAST:
18891     return PerformBITCASTCombine(N, DCI, Subtarget);
18892   case ARMISD::PREDICATE_CAST:
18893     return PerformPREDICATE_CASTCombine(N, DCI);
18894   case ARMISD::VECTOR_REG_CAST:
18895     return PerformVECTOR_REG_CASTCombine(N, DCI.DAG, Subtarget);
18896   case ARMISD::MVETRUNC:
18897     return PerformMVETruncCombine(N, DCI);
18898   case ARMISD::MVESEXT:
18899   case ARMISD::MVEZEXT:
18900     return PerformMVEExtCombine(N, DCI);
18901   case ARMISD::VCMP:
18902     return PerformVCMPCombine(N, DCI.DAG, Subtarget);
18903   case ISD::VECREDUCE_ADD:
18904     return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget);
18905   case ARMISD::VADDVs:
18906   case ARMISD::VADDVu:
18907   case ARMISD::VADDLVs:
18908   case ARMISD::VADDLVu:
18909   case ARMISD::VADDLVAs:
18910   case ARMISD::VADDLVAu:
18911   case ARMISD::VMLAVs:
18912   case ARMISD::VMLAVu:
18913   case ARMISD::VMLALVs:
18914   case ARMISD::VMLALVu:
18915   case ARMISD::VMLALVAs:
18916   case ARMISD::VMLALVAu:
18917     return PerformReduceShuffleCombine(N, DCI.DAG);
18918   case ARMISD::VMOVN:
18919     return PerformVMOVNCombine(N, DCI);
18920   case ARMISD::VQMOVNs:
18921   case ARMISD::VQMOVNu:
18922     return PerformVQMOVNCombine(N, DCI);
18923   case ARMISD::VQDMULH:
18924     return PerformVQDMULHCombine(N, DCI);
18925   case ARMISD::ASRL:
18926   case ARMISD::LSRL:
18927   case ARMISD::LSLL:
18928     return PerformLongShiftCombine(N, DCI.DAG);
18929   case ARMISD::SMULWB: {
18930     unsigned BitWidth = N->getValueType(0).getSizeInBits();
18931     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
18932     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
18933       return SDValue();
18934     break;
18935   }
18936   case ARMISD::SMULWT: {
18937     unsigned BitWidth = N->getValueType(0).getSizeInBits();
18938     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
18939     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
18940       return SDValue();
18941     break;
18942   }
18943   case ARMISD::SMLALBB:
18944   case ARMISD::QADD16b:
18945   case ARMISD::QSUB16b:
18946   case ARMISD::UQADD16b:
18947   case ARMISD::UQSUB16b: {
18948     unsigned BitWidth = N->getValueType(0).getSizeInBits();
18949     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
18950     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
18951         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
18952       return SDValue();
18953     break;
18954   }
18955   case ARMISD::SMLALBT: {
18956     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
18957     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
18958     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
18959     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
18960     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
18961         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
18962       return SDValue();
18963     break;
18964   }
18965   case ARMISD::SMLALTB: {
18966     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
18967     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
18968     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
18969     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
18970     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
18971         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
18972       return SDValue();
18973     break;
18974   }
18975   case ARMISD::SMLALTT: {
18976     unsigned BitWidth = N->getValueType(0).getSizeInBits();
18977     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
18978     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
18979         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
18980       return SDValue();
18981     break;
18982   }
18983   case ARMISD::QADD8b:
18984   case ARMISD::QSUB8b:
18985   case ARMISD::UQADD8b:
18986   case ARMISD::UQSUB8b: {
18987     unsigned BitWidth = N->getValueType(0).getSizeInBits();
18988     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
18989     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
18990         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
18991       return SDValue();
18992     break;
18993   }
18994   case ISD::INTRINSIC_VOID:
18995   case ISD::INTRINSIC_W_CHAIN:
18996     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
18997     case Intrinsic::arm_neon_vld1:
18998     case Intrinsic::arm_neon_vld1x2:
18999     case Intrinsic::arm_neon_vld1x3:
19000     case Intrinsic::arm_neon_vld1x4:
19001     case Intrinsic::arm_neon_vld2:
19002     case Intrinsic::arm_neon_vld3:
19003     case Intrinsic::arm_neon_vld4:
19004     case Intrinsic::arm_neon_vld2lane:
19005     case Intrinsic::arm_neon_vld3lane:
19006     case Intrinsic::arm_neon_vld4lane:
19007     case Intrinsic::arm_neon_vld2dup:
19008     case Intrinsic::arm_neon_vld3dup:
19009     case Intrinsic::arm_neon_vld4dup:
19010     case Intrinsic::arm_neon_vst1:
19011     case Intrinsic::arm_neon_vst1x2:
19012     case Intrinsic::arm_neon_vst1x3:
19013     case Intrinsic::arm_neon_vst1x4:
19014     case Intrinsic::arm_neon_vst2:
19015     case Intrinsic::arm_neon_vst3:
19016     case Intrinsic::arm_neon_vst4:
19017     case Intrinsic::arm_neon_vst2lane:
19018     case Intrinsic::arm_neon_vst3lane:
19019     case Intrinsic::arm_neon_vst4lane:
19020       return PerformVLDCombine(N, DCI);
19021     case Intrinsic::arm_mve_vld2q:
19022     case Intrinsic::arm_mve_vld4q:
19023     case Intrinsic::arm_mve_vst2q:
19024     case Intrinsic::arm_mve_vst4q:
19025       return PerformMVEVLDCombine(N, DCI);
19026     default: break;
19027     }
19028     break;
19029   }
19030   return SDValue();
19031 }
19032 
19033 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
19034                                                           EVT VT) const {
19035   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
19036 }
19037 
19038 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
19039                                                        Align Alignment,
19040                                                        MachineMemOperand::Flags,
19041                                                        unsigned *Fast) const {
19042   // Depends what it gets converted into if the type is weird.
19043   if (!VT.isSimple())
19044     return false;
19045 
19046   // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus
19047   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
19048   auto Ty = VT.getSimpleVT().SimpleTy;
19049 
19050   if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
19051     // Unaligned access can use (for example) LRDB, LRDH, LDR
19052     if (AllowsUnaligned) {
19053       if (Fast)
19054         *Fast = Subtarget->hasV7Ops();
19055       return true;
19056     }
19057   }
19058 
19059   if (Ty == MVT::f64 || Ty == MVT::v2f64) {
19060     // For any little-endian targets with neon, we can support unaligned ld/st
19061     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
19062     // A big-endian target may also explicitly support unaligned accesses
19063     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
19064       if (Fast)
19065         *Fast = 1;
19066       return true;
19067     }
19068   }
19069 
19070   if (!Subtarget->hasMVEIntegerOps())
19071     return false;
19072 
19073   // These are for predicates
19074   if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 ||
19075        Ty == MVT::v2i1)) {
19076     if (Fast)
19077       *Fast = 1;
19078     return true;
19079   }
19080 
19081   // These are for truncated stores/narrowing loads. They are fine so long as
19082   // the alignment is at least the size of the item being loaded
19083   if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) &&
19084       Alignment >= VT.getScalarSizeInBits() / 8) {
19085     if (Fast)
19086       *Fast = true;
19087     return true;
19088   }
19089 
19090   // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
19091   // VSTRW.U32 all store the vector register in exactly the same format, and
19092   // differ only in the range of their immediate offset field and the required
19093   // alignment. So there is always a store that can be used, regardless of
19094   // actual type.
19095   //
19096   // For big endian, that is not the case. But can still emit a (VSTRB.U8;
19097   // VREV64.8) pair and get the same effect. This will likely be better than
19098   // aligning the vector through the stack.
19099   if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 ||
19100       Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 ||
19101       Ty == MVT::v2f64) {
19102     if (Fast)
19103       *Fast = 1;
19104     return true;
19105   }
19106 
19107   return false;
19108 }
19109 
19110 
19111 EVT ARMTargetLowering::getOptimalMemOpType(
19112     const MemOp &Op, const AttributeList &FuncAttributes) const {
19113   // See if we can use NEON instructions for this...
19114   if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() &&
19115       !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
19116     unsigned Fast;
19117     if (Op.size() >= 16 &&
19118         (Op.isAligned(Align(16)) ||
19119          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1),
19120                                          MachineMemOperand::MONone, &Fast) &&
19121           Fast))) {
19122       return MVT::v2f64;
19123     } else if (Op.size() >= 8 &&
19124                (Op.isAligned(Align(8)) ||
19125                 (allowsMisalignedMemoryAccesses(
19126                      MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) &&
19127                  Fast))) {
19128       return MVT::f64;
19129     }
19130   }
19131 
19132   // Let the target-independent logic figure it out.
19133   return MVT::Other;
19134 }
19135 
19136 // 64-bit integers are split into their high and low parts and held in two
19137 // different registers, so the trunc is free since the low register can just
19138 // be used.
19139 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
19140   if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
19141     return false;
19142   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
19143   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
19144   return (SrcBits == 64 && DestBits == 32);
19145 }
19146 
19147 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
19148   if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
19149       !DstVT.isInteger())
19150     return false;
19151   unsigned SrcBits = SrcVT.getSizeInBits();
19152   unsigned DestBits = DstVT.getSizeInBits();
19153   return (SrcBits == 64 && DestBits == 32);
19154 }
19155 
19156 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
19157   if (Val.getOpcode() != ISD::LOAD)
19158     return false;
19159 
19160   EVT VT1 = Val.getValueType();
19161   if (!VT1.isSimple() || !VT1.isInteger() ||
19162       !VT2.isSimple() || !VT2.isInteger())
19163     return false;
19164 
19165   switch (VT1.getSimpleVT().SimpleTy) {
19166   default: break;
19167   case MVT::i1:
19168   case MVT::i8:
19169   case MVT::i16:
19170     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
19171     return true;
19172   }
19173 
19174   return false;
19175 }
19176 
19177 bool ARMTargetLowering::isFNegFree(EVT VT) const {
19178   if (!VT.isSimple())
19179     return false;
19180 
19181   // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
19182   // negate values directly (fneg is free). So, we don't want to let the DAG
19183   // combiner rewrite fneg into xors and some other instructions.  For f16 and
19184   // FullFP16 argument passing, some bitcast nodes may be introduced,
19185   // triggering this DAG combine rewrite, so we are avoiding that with this.
19186   switch (VT.getSimpleVT().SimpleTy) {
19187   default: break;
19188   case MVT::f16:
19189     return Subtarget->hasFullFP16();
19190   }
19191 
19192   return false;
19193 }
19194 
19195 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
19196 /// of the vector elements.
19197 static bool areExtractExts(Value *Ext1, Value *Ext2) {
19198   auto areExtDoubled = [](Instruction *Ext) {
19199     return Ext->getType()->getScalarSizeInBits() ==
19200            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
19201   };
19202 
19203   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
19204       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
19205       !areExtDoubled(cast<Instruction>(Ext1)) ||
19206       !areExtDoubled(cast<Instruction>(Ext2)))
19207     return false;
19208 
19209   return true;
19210 }
19211 
19212 /// Check if sinking \p I's operands to I's basic block is profitable, because
19213 /// the operands can be folded into a target instruction, e.g.
19214 /// sext/zext can be folded into vsubl.
19215 bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
19216                                            SmallVectorImpl<Use *> &Ops) const {
19217   if (!I->getType()->isVectorTy())
19218     return false;
19219 
19220   if (Subtarget->hasNEON()) {
19221     switch (I->getOpcode()) {
19222     case Instruction::Sub:
19223     case Instruction::Add: {
19224       if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
19225         return false;
19226       Ops.push_back(&I->getOperandUse(0));
19227       Ops.push_back(&I->getOperandUse(1));
19228       return true;
19229     }
19230     default:
19231       return false;
19232     }
19233   }
19234 
19235   if (!Subtarget->hasMVEIntegerOps())
19236     return false;
19237 
19238   auto IsFMSMul = [&](Instruction *I) {
19239     if (!I->hasOneUse())
19240       return false;
19241     auto *Sub = cast<Instruction>(*I->users().begin());
19242     return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I;
19243   };
19244   auto IsFMS = [&](Instruction *I) {
19245     if (match(I->getOperand(0), m_FNeg(m_Value())) ||
19246         match(I->getOperand(1), m_FNeg(m_Value())))
19247       return true;
19248     return false;
19249   };
19250 
19251   auto IsSinker = [&](Instruction *I, int Operand) {
19252     switch (I->getOpcode()) {
19253     case Instruction::Add:
19254     case Instruction::Mul:
19255     case Instruction::FAdd:
19256     case Instruction::ICmp:
19257     case Instruction::FCmp:
19258       return true;
19259     case Instruction::FMul:
19260       return !IsFMSMul(I);
19261     case Instruction::Sub:
19262     case Instruction::FSub:
19263     case Instruction::Shl:
19264     case Instruction::LShr:
19265     case Instruction::AShr:
19266       return Operand == 1;
19267     case Instruction::Call:
19268       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
19269         switch (II->getIntrinsicID()) {
19270         case Intrinsic::fma:
19271           return !IsFMS(I);
19272         case Intrinsic::sadd_sat:
19273         case Intrinsic::uadd_sat:
19274         case Intrinsic::arm_mve_add_predicated:
19275         case Intrinsic::arm_mve_mul_predicated:
19276         case Intrinsic::arm_mve_qadd_predicated:
19277         case Intrinsic::arm_mve_vhadd:
19278         case Intrinsic::arm_mve_hadd_predicated:
19279         case Intrinsic::arm_mve_vqdmull:
19280         case Intrinsic::arm_mve_vqdmull_predicated:
19281         case Intrinsic::arm_mve_vqdmulh:
19282         case Intrinsic::arm_mve_qdmulh_predicated:
19283         case Intrinsic::arm_mve_vqrdmulh:
19284         case Intrinsic::arm_mve_qrdmulh_predicated:
19285         case Intrinsic::arm_mve_fma_predicated:
19286           return true;
19287         case Intrinsic::ssub_sat:
19288         case Intrinsic::usub_sat:
19289         case Intrinsic::arm_mve_sub_predicated:
19290         case Intrinsic::arm_mve_qsub_predicated:
19291         case Intrinsic::arm_mve_hsub_predicated:
19292         case Intrinsic::arm_mve_vhsub:
19293           return Operand == 1;
19294         default:
19295           return false;
19296         }
19297       }
19298       return false;
19299     default:
19300       return false;
19301     }
19302   };
19303 
19304   for (auto OpIdx : enumerate(I->operands())) {
19305     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
19306     // Make sure we are not already sinking this operand
19307     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
19308       continue;
19309 
19310     Instruction *Shuffle = Op;
19311     if (Shuffle->getOpcode() == Instruction::BitCast)
19312       Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
19313     // We are looking for a splat that can be sunk.
19314     if (!Shuffle ||
19315         !match(Shuffle, m_Shuffle(
19316                             m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
19317                             m_Undef(), m_ZeroMask())))
19318       continue;
19319     if (!IsSinker(I, OpIdx.index()))
19320       continue;
19321 
19322     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
19323     // and vector registers
19324     for (Use &U : Op->uses()) {
19325       Instruction *Insn = cast<Instruction>(U.getUser());
19326       if (!IsSinker(Insn, U.getOperandNo()))
19327         return false;
19328     }
19329 
19330     Ops.push_back(&Shuffle->getOperandUse(0));
19331     if (Shuffle != Op)
19332       Ops.push_back(&Op->getOperandUse(0));
19333     Ops.push_back(&OpIdx.value());
19334   }
19335   return true;
19336 }
19337 
19338 Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {
19339   if (!Subtarget->hasMVEIntegerOps())
19340     return nullptr;
19341   Type *SVIType = SVI->getType();
19342   Type *ScalarType = SVIType->getScalarType();
19343 
19344   if (ScalarType->isFloatTy())
19345     return Type::getInt32Ty(SVIType->getContext());
19346   if (ScalarType->isHalfTy())
19347     return Type::getInt16Ty(SVIType->getContext());
19348   return nullptr;
19349 }
19350 
19351 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
19352   EVT VT = ExtVal.getValueType();
19353 
19354   if (!isTypeLegal(VT))
19355     return false;
19356 
19357   if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) {
19358     if (Ld->isExpandingLoad())
19359       return false;
19360   }
19361 
19362   if (Subtarget->hasMVEIntegerOps())
19363     return true;
19364 
19365   // Don't create a loadext if we can fold the extension into a wide/long
19366   // instruction.
19367   // If there's more than one user instruction, the loadext is desirable no
19368   // matter what.  There can be two uses by the same instruction.
19369   if (ExtVal->use_empty() ||
19370       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
19371     return true;
19372 
19373   SDNode *U = *ExtVal->use_begin();
19374   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
19375        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM))
19376     return false;
19377 
19378   return true;
19379 }
19380 
19381 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
19382   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
19383     return false;
19384 
19385   if (!isTypeLegal(EVT::getEVT(Ty1)))
19386     return false;
19387 
19388   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
19389 
19390   // Assuming the caller doesn't have a zeroext or signext return parameter,
19391   // truncation all the way down to i1 is valid.
19392   return true;
19393 }
19394 
19395 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
19396 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
19397 /// expanded to FMAs when this method returns true, otherwise fmuladd is
19398 /// expanded to fmul + fadd.
19399 ///
19400 /// ARM supports both fused and unfused multiply-add operations; we already
19401 /// lower a pair of fmul and fadd to the latter so it's not clear that there
19402 /// would be a gain or that the gain would be worthwhile enough to risk
19403 /// correctness bugs.
19404 ///
19405 /// For MVE, we set this to true as it helps simplify the need for some
19406 /// patterns (and we don't have the non-fused floating point instruction).
19407 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
19408                                                    EVT VT) const {
19409   if (!VT.isSimple())
19410     return false;
19411 
19412   switch (VT.getSimpleVT().SimpleTy) {
19413   case MVT::v4f32:
19414   case MVT::v8f16:
19415     return Subtarget->hasMVEFloatOps();
19416   case MVT::f16:
19417     return Subtarget->useFPVFMx16();
19418   case MVT::f32:
19419     return Subtarget->useFPVFMx();
19420   case MVT::f64:
19421     return Subtarget->useFPVFMx64();
19422   default:
19423     break;
19424   }
19425 
19426   return false;
19427 }
19428 
19429 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
19430   if (V < 0)
19431     return false;
19432 
19433   unsigned Scale = 1;
19434   switch (VT.getSimpleVT().SimpleTy) {
19435   case MVT::i1:
19436   case MVT::i8:
19437     // Scale == 1;
19438     break;
19439   case MVT::i16:
19440     // Scale == 2;
19441     Scale = 2;
19442     break;
19443   default:
19444     // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
19445     // Scale == 4;
19446     Scale = 4;
19447     break;
19448   }
19449 
19450   if ((V & (Scale - 1)) != 0)
19451     return false;
19452   return isUInt<5>(V / Scale);
19453 }
19454 
19455 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
19456                                       const ARMSubtarget *Subtarget) {
19457   if (!VT.isInteger() && !VT.isFloatingPoint())
19458     return false;
19459   if (VT.isVector() && Subtarget->hasNEON())
19460     return false;
19461   if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
19462       !Subtarget->hasMVEFloatOps())
19463     return false;
19464 
19465   bool IsNeg = false;
19466   if (V < 0) {
19467     IsNeg = true;
19468     V = -V;
19469   }
19470 
19471   unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U);
19472 
19473   // MVE: size * imm7
19474   if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
19475     switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
19476     case MVT::i32:
19477     case MVT::f32:
19478       return isShiftedUInt<7,2>(V);
19479     case MVT::i16:
19480     case MVT::f16:
19481       return isShiftedUInt<7,1>(V);
19482     case MVT::i8:
19483       return isUInt<7>(V);
19484     default:
19485       return false;
19486     }
19487   }
19488 
19489   // half VLDR: 2 * imm8
19490   if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
19491     return isShiftedUInt<8, 1>(V);
19492   // VLDR and LDRD: 4 * imm8
19493   if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
19494     return isShiftedUInt<8, 2>(V);
19495 
19496   if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
19497     // + imm12 or - imm8
19498     if (IsNeg)
19499       return isUInt<8>(V);
19500     return isUInt<12>(V);
19501   }
19502 
19503   return false;
19504 }
19505 
19506 /// isLegalAddressImmediate - Return true if the integer value can be used
19507 /// as the offset of the target addressing mode for load / store of the
19508 /// given type.
19509 static bool isLegalAddressImmediate(int64_t V, EVT VT,
19510                                     const ARMSubtarget *Subtarget) {
19511   if (V == 0)
19512     return true;
19513 
19514   if (!VT.isSimple())
19515     return false;
19516 
19517   if (Subtarget->isThumb1Only())
19518     return isLegalT1AddressImmediate(V, VT);
19519   else if (Subtarget->isThumb2())
19520     return isLegalT2AddressImmediate(V, VT, Subtarget);
19521 
19522   // ARM mode.
19523   if (V < 0)
19524     V = - V;
19525   switch (VT.getSimpleVT().SimpleTy) {
19526   default: return false;
19527   case MVT::i1:
19528   case MVT::i8:
19529   case MVT::i32:
19530     // +- imm12
19531     return isUInt<12>(V);
19532   case MVT::i16:
19533     // +- imm8
19534     return isUInt<8>(V);
19535   case MVT::f32:
19536   case MVT::f64:
19537     if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
19538       return false;
19539     return isShiftedUInt<8, 2>(V);
19540   }
19541 }
19542 
19543 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
19544                                                       EVT VT) const {
19545   int Scale = AM.Scale;
19546   if (Scale < 0)
19547     return false;
19548 
19549   switch (VT.getSimpleVT().SimpleTy) {
19550   default: return false;
19551   case MVT::i1:
19552   case MVT::i8:
19553   case MVT::i16:
19554   case MVT::i32:
19555     if (Scale == 1)
19556       return true;
19557     // r + r << imm
19558     Scale = Scale & ~1;
19559     return Scale == 2 || Scale == 4 || Scale == 8;
19560   case MVT::i64:
19561     // FIXME: What are we trying to model here? ldrd doesn't have an r + r
19562     // version in Thumb mode.
19563     // r + r
19564     if (Scale == 1)
19565       return true;
19566     // r * 2 (this can be lowered to r + r).
19567     if (!AM.HasBaseReg && Scale == 2)
19568       return true;
19569     return false;
19570   case MVT::isVoid:
19571     // Note, we allow "void" uses (basically, uses that aren't loads or
19572     // stores), because arm allows folding a scale into many arithmetic
19573     // operations.  This should be made more precise and revisited later.
19574 
19575     // Allow r << imm, but the imm has to be a multiple of two.
19576     if (Scale & 1) return false;
19577     return isPowerOf2_32(Scale);
19578   }
19579 }
19580 
19581 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
19582                                                       EVT VT) const {
19583   const int Scale = AM.Scale;
19584 
19585   // Negative scales are not supported in Thumb1.
19586   if (Scale < 0)
19587     return false;
19588 
19589   // Thumb1 addressing modes do not support register scaling excepting the
19590   // following cases:
19591   // 1. Scale == 1 means no scaling.
19592   // 2. Scale == 2 this can be lowered to r + r if there is no base register.
19593   return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
19594 }
19595 
19596 /// isLegalAddressingMode - Return true if the addressing mode represented
19597 /// by AM is legal for this target, for a load/store of the specified type.
19598 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
19599                                               const AddrMode &AM, Type *Ty,
19600                                               unsigned AS, Instruction *I) const {
19601   EVT VT = getValueType(DL, Ty, true);
19602   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
19603     return false;
19604 
19605   // Can never fold addr of global into load/store.
19606   if (AM.BaseGV)
19607     return false;
19608 
19609   switch (AM.Scale) {
19610   case 0:  // no scale reg, must be "r+i" or "r", or "i".
19611     break;
19612   default:
19613     // ARM doesn't support any R+R*scale+imm addr modes.
19614     if (AM.BaseOffs)
19615       return false;
19616 
19617     if (!VT.isSimple())
19618       return false;
19619 
19620     if (Subtarget->isThumb1Only())
19621       return isLegalT1ScaledAddressingMode(AM, VT);
19622 
19623     if (Subtarget->isThumb2())
19624       return isLegalT2ScaledAddressingMode(AM, VT);
19625 
19626     int Scale = AM.Scale;
19627     switch (VT.getSimpleVT().SimpleTy) {
19628     default: return false;
19629     case MVT::i1:
19630     case MVT::i8:
19631     case MVT::i32:
19632       if (Scale < 0) Scale = -Scale;
19633       if (Scale == 1)
19634         return true;
19635       // r + r << imm
19636       return isPowerOf2_32(Scale & ~1);
19637     case MVT::i16:
19638     case MVT::i64:
19639       // r +/- r
19640       if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
19641         return true;
19642       // r * 2 (this can be lowered to r + r).
19643       if (!AM.HasBaseReg && Scale == 2)
19644         return true;
19645       return false;
19646 
19647     case MVT::isVoid:
19648       // Note, we allow "void" uses (basically, uses that aren't loads or
19649       // stores), because arm allows folding a scale into many arithmetic
19650       // operations.  This should be made more precise and revisited later.
19651 
19652       // Allow r << imm, but the imm has to be a multiple of two.
19653       if (Scale & 1) return false;
19654       return isPowerOf2_32(Scale);
19655     }
19656   }
19657   return true;
19658 }
19659 
19660 /// isLegalICmpImmediate - Return true if the specified immediate is legal
19661 /// icmp immediate, that is the target has icmp instructions which can compare
19662 /// a register against the immediate without having to materialize the
19663 /// immediate into a register.
19664 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
19665   // Thumb2 and ARM modes can use cmn for negative immediates.
19666   if (!Subtarget->isThumb())
19667     return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
19668            ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
19669   if (Subtarget->isThumb2())
19670     return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
19671            ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
19672   // Thumb1 doesn't have cmn, and only 8-bit immediates.
19673   return Imm >= 0 && Imm <= 255;
19674 }
19675 
19676 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
19677 /// *or sub* immediate, that is the target has add or sub instructions which can
19678 /// add a register with the immediate without having to materialize the
19679 /// immediate into a register.
19680 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
19681   // Same encoding for add/sub, just flip the sign.
19682   int64_t AbsImm = std::abs(Imm);
19683   if (!Subtarget->isThumb())
19684     return ARM_AM::getSOImmVal(AbsImm) != -1;
19685   if (Subtarget->isThumb2())
19686     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
19687   // Thumb1 only has 8-bit unsigned immediate.
19688   return AbsImm >= 0 && AbsImm <= 255;
19689 }
19690 
19691 // Return false to prevent folding
19692 // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine,
19693 // if the folding leads to worse code.
19694 bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
19695                                                     SDValue ConstNode) const {
19696   // Let the DAGCombiner decide for vector types and large types.
19697   const EVT VT = AddNode.getValueType();
19698   if (VT.isVector() || VT.getScalarSizeInBits() > 32)
19699     return true;
19700 
19701   // It is worse if c0 is legal add immediate, while c1*c0 is not
19702   // and has to be composed by at least two instructions.
19703   const ConstantSDNode *C0Node = cast<ConstantSDNode>(AddNode.getOperand(1));
19704   const ConstantSDNode *C1Node = cast<ConstantSDNode>(ConstNode);
19705   const int64_t C0 = C0Node->getSExtValue();
19706   APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue();
19707   if (!isLegalAddImmediate(C0) || isLegalAddImmediate(CA.getSExtValue()))
19708     return true;
19709   if (ConstantMaterializationCost((unsigned)CA.getZExtValue(), Subtarget) > 1)
19710     return false;
19711 
19712   // Default to true and let the DAGCombiner decide.
19713   return true;
19714 }
19715 
19716 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
19717                                       bool isSEXTLoad, SDValue &Base,
19718                                       SDValue &Offset, bool &isInc,
19719                                       SelectionDAG &DAG) {
19720   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
19721     return false;
19722 
19723   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
19724     // AddressingMode 3
19725     Base = Ptr->getOperand(0);
19726     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
19727       int RHSC = (int)RHS->getZExtValue();
19728       if (RHSC < 0 && RHSC > -256) {
19729         assert(Ptr->getOpcode() == ISD::ADD);
19730         isInc = false;
19731         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
19732         return true;
19733       }
19734     }
19735     isInc = (Ptr->getOpcode() == ISD::ADD);
19736     Offset = Ptr->getOperand(1);
19737     return true;
19738   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
19739     // AddressingMode 2
19740     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
19741       int RHSC = (int)RHS->getZExtValue();
19742       if (RHSC < 0 && RHSC > -0x1000) {
19743         assert(Ptr->getOpcode() == ISD::ADD);
19744         isInc = false;
19745         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
19746         Base = Ptr->getOperand(0);
19747         return true;
19748       }
19749     }
19750 
19751     if (Ptr->getOpcode() == ISD::ADD) {
19752       isInc = true;
19753       ARM_AM::ShiftOpc ShOpcVal=
19754         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
19755       if (ShOpcVal != ARM_AM::no_shift) {
19756         Base = Ptr->getOperand(1);
19757         Offset = Ptr->getOperand(0);
19758       } else {
19759         Base = Ptr->getOperand(0);
19760         Offset = Ptr->getOperand(1);
19761       }
19762       return true;
19763     }
19764 
19765     isInc = (Ptr->getOpcode() == ISD::ADD);
19766     Base = Ptr->getOperand(0);
19767     Offset = Ptr->getOperand(1);
19768     return true;
19769   }
19770 
19771   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
19772   return false;
19773 }
19774 
19775 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
19776                                      bool isSEXTLoad, SDValue &Base,
19777                                      SDValue &Offset, bool &isInc,
19778                                      SelectionDAG &DAG) {
19779   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
19780     return false;
19781 
19782   Base = Ptr->getOperand(0);
19783   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
19784     int RHSC = (int)RHS->getZExtValue();
19785     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
19786       assert(Ptr->getOpcode() == ISD::ADD);
19787       isInc = false;
19788       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
19789       return true;
19790     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
19791       isInc = Ptr->getOpcode() == ISD::ADD;
19792       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
19793       return true;
19794     }
19795   }
19796 
19797   return false;
19798 }
19799 
19800 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
19801                                       bool isSEXTLoad, bool IsMasked, bool isLE,
19802                                       SDValue &Base, SDValue &Offset,
19803                                       bool &isInc, SelectionDAG &DAG) {
19804   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
19805     return false;
19806   if (!isa<ConstantSDNode>(Ptr->getOperand(1)))
19807     return false;
19808 
19809   // We allow LE non-masked loads to change the type (for example use a vldrb.8
19810   // as opposed to a vldrw.32). This can allow extra addressing modes or
19811   // alignments for what is otherwise an equivalent instruction.
19812   bool CanChangeType = isLE && !IsMasked;
19813 
19814   ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1));
19815   int RHSC = (int)RHS->getZExtValue();
19816 
19817   auto IsInRange = [&](int RHSC, int Limit, int Scale) {
19818     if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
19819       assert(Ptr->getOpcode() == ISD::ADD);
19820       isInc = false;
19821       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
19822       return true;
19823     } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
19824       isInc = Ptr->getOpcode() == ISD::ADD;
19825       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
19826       return true;
19827     }
19828     return false;
19829   };
19830 
19831   // Try to find a matching instruction based on s/zext, Alignment, Offset and
19832   // (in BE/masked) type.
19833   Base = Ptr->getOperand(0);
19834   if (VT == MVT::v4i16) {
19835     if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2))
19836       return true;
19837   } else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
19838     if (IsInRange(RHSC, 0x80, 1))
19839       return true;
19840   } else if (Alignment >= 4 &&
19841              (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
19842              IsInRange(RHSC, 0x80, 4))
19843     return true;
19844   else if (Alignment >= 2 &&
19845            (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
19846            IsInRange(RHSC, 0x80, 2))
19847     return true;
19848   else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
19849     return true;
19850   return false;
19851 }
19852 
19853 /// getPreIndexedAddressParts - returns true by value, base pointer and
19854 /// offset pointer and addressing mode by reference if the node's address
19855 /// can be legally represented as pre-indexed load / store address.
19856 bool
19857 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
19858                                              SDValue &Offset,
19859                                              ISD::MemIndexedMode &AM,
19860                                              SelectionDAG &DAG) const {
19861   if (Subtarget->isThumb1Only())
19862     return false;
19863 
19864   EVT VT;
19865   SDValue Ptr;
19866   Align Alignment;
19867   bool isSEXTLoad = false;
19868   bool IsMasked = false;
19869   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19870     Ptr = LD->getBasePtr();
19871     VT = LD->getMemoryVT();
19872     Alignment = LD->getAlign();
19873     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
19874   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19875     Ptr = ST->getBasePtr();
19876     VT = ST->getMemoryVT();
19877     Alignment = ST->getAlign();
19878   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
19879     Ptr = LD->getBasePtr();
19880     VT = LD->getMemoryVT();
19881     Alignment = LD->getAlign();
19882     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
19883     IsMasked = true;
19884   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
19885     Ptr = ST->getBasePtr();
19886     VT = ST->getMemoryVT();
19887     Alignment = ST->getAlign();
19888     IsMasked = true;
19889   } else
19890     return false;
19891 
19892   bool isInc;
19893   bool isLegal = false;
19894   if (VT.isVector())
19895     isLegal = Subtarget->hasMVEIntegerOps() &&
19896               getMVEIndexedAddressParts(
19897                   Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked,
19898                   Subtarget->isLittle(), Base, Offset, isInc, DAG);
19899   else {
19900     if (Subtarget->isThumb2())
19901       isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
19902                                          Offset, isInc, DAG);
19903     else
19904       isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
19905                                           Offset, isInc, DAG);
19906   }
19907   if (!isLegal)
19908     return false;
19909 
19910   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
19911   return true;
19912 }
19913 
19914 /// getPostIndexedAddressParts - returns true by value, base pointer and
19915 /// offset pointer and addressing mode by reference if this node can be
19916 /// combined with a load / store to form a post-indexed load / store.
19917 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
19918                                                    SDValue &Base,
19919                                                    SDValue &Offset,
19920                                                    ISD::MemIndexedMode &AM,
19921                                                    SelectionDAG &DAG) const {
19922   EVT VT;
19923   SDValue Ptr;
19924   Align Alignment;
19925   bool isSEXTLoad = false, isNonExt;
19926   bool IsMasked = false;
19927   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19928     VT = LD->getMemoryVT();
19929     Ptr = LD->getBasePtr();
19930     Alignment = LD->getAlign();
19931     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
19932     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
19933   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19934     VT = ST->getMemoryVT();
19935     Ptr = ST->getBasePtr();
19936     Alignment = ST->getAlign();
19937     isNonExt = !ST->isTruncatingStore();
19938   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
19939     VT = LD->getMemoryVT();
19940     Ptr = LD->getBasePtr();
19941     Alignment = LD->getAlign();
19942     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
19943     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
19944     IsMasked = true;
19945   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
19946     VT = ST->getMemoryVT();
19947     Ptr = ST->getBasePtr();
19948     Alignment = ST->getAlign();
19949     isNonExt = !ST->isTruncatingStore();
19950     IsMasked = true;
19951   } else
19952     return false;
19953 
19954   if (Subtarget->isThumb1Only()) {
19955     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
19956     // must be non-extending/truncating, i32, with an offset of 4.
19957     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
19958     if (Op->getOpcode() != ISD::ADD || !isNonExt)
19959       return false;
19960     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
19961     if (!RHS || RHS->getZExtValue() != 4)
19962       return false;
19963     if (Alignment < Align(4))
19964       return false;
19965 
19966     Offset = Op->getOperand(1);
19967     Base = Op->getOperand(0);
19968     AM = ISD::POST_INC;
19969     return true;
19970   }
19971 
19972   bool isInc;
19973   bool isLegal = false;
19974   if (VT.isVector())
19975     isLegal = Subtarget->hasMVEIntegerOps() &&
19976               getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked,
19977                                         Subtarget->isLittle(), Base, Offset,
19978                                         isInc, DAG);
19979   else {
19980     if (Subtarget->isThumb2())
19981       isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
19982                                          isInc, DAG);
19983     else
19984       isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
19985                                           isInc, DAG);
19986   }
19987   if (!isLegal)
19988     return false;
19989 
19990   if (Ptr != Base) {
19991     // Swap base ptr and offset to catch more post-index load / store when
19992     // it's legal. In Thumb2 mode, offset must be an immediate.
19993     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
19994         !Subtarget->isThumb2())
19995       std::swap(Base, Offset);
19996 
19997     // Post-indexed load / store update the base pointer.
19998     if (Ptr != Base)
19999       return false;
20000   }
20001 
20002   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
20003   return true;
20004 }
20005 
20006 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
20007                                                       KnownBits &Known,
20008                                                       const APInt &DemandedElts,
20009                                                       const SelectionDAG &DAG,
20010                                                       unsigned Depth) const {
20011   unsigned BitWidth = Known.getBitWidth();
20012   Known.resetAll();
20013   switch (Op.getOpcode()) {
20014   default: break;
20015   case ARMISD::ADDC:
20016   case ARMISD::ADDE:
20017   case ARMISD::SUBC:
20018   case ARMISD::SUBE:
20019     // Special cases when we convert a carry to a boolean.
20020     if (Op.getResNo() == 0) {
20021       SDValue LHS = Op.getOperand(0);
20022       SDValue RHS = Op.getOperand(1);
20023       // (ADDE 0, 0, C) will give us a single bit.
20024       if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
20025           isNullConstant(RHS)) {
20026         Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
20027         return;
20028       }
20029     }
20030     break;
20031   case ARMISD::CMOV: {
20032     // Bits are known zero/one if known on the LHS and RHS.
20033     Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
20034     if (Known.isUnknown())
20035       return;
20036 
20037     KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
20038     Known = Known.intersectWith(KnownRHS);
20039     return;
20040   }
20041   case ISD::INTRINSIC_W_CHAIN: {
20042     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
20043     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
20044     switch (IntID) {
20045     default: return;
20046     case Intrinsic::arm_ldaex:
20047     case Intrinsic::arm_ldrex: {
20048       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
20049       unsigned MemBits = VT.getScalarSizeInBits();
20050       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
20051       return;
20052     }
20053     }
20054   }
20055   case ARMISD::BFI: {
20056     // Conservatively, we can recurse down the first operand
20057     // and just mask out all affected bits.
20058     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
20059 
20060     // The operand to BFI is already a mask suitable for removing the bits it
20061     // sets.
20062     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
20063     const APInt &Mask = CI->getAPIntValue();
20064     Known.Zero &= Mask;
20065     Known.One &= Mask;
20066     return;
20067   }
20068   case ARMISD::VGETLANEs:
20069   case ARMISD::VGETLANEu: {
20070     const SDValue &SrcSV = Op.getOperand(0);
20071     EVT VecVT = SrcSV.getValueType();
20072     assert(VecVT.isVector() && "VGETLANE expected a vector type");
20073     const unsigned NumSrcElts = VecVT.getVectorNumElements();
20074     ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
20075     assert(Pos->getAPIntValue().ult(NumSrcElts) &&
20076            "VGETLANE index out of bounds");
20077     unsigned Idx = Pos->getZExtValue();
20078     APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
20079     Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
20080 
20081     EVT VT = Op.getValueType();
20082     const unsigned DstSz = VT.getScalarSizeInBits();
20083     const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
20084     (void)SrcSz;
20085     assert(SrcSz == Known.getBitWidth());
20086     assert(DstSz > SrcSz);
20087     if (Op.getOpcode() == ARMISD::VGETLANEs)
20088       Known = Known.sext(DstSz);
20089     else {
20090       Known = Known.zext(DstSz);
20091     }
20092     assert(DstSz == Known.getBitWidth());
20093     break;
20094   }
20095   case ARMISD::VMOVrh: {
20096     KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
20097     assert(KnownOp.getBitWidth() == 16);
20098     Known = KnownOp.zext(32);
20099     break;
20100   }
20101   case ARMISD::CSINC:
20102   case ARMISD::CSINV:
20103   case ARMISD::CSNEG: {
20104     KnownBits KnownOp0 = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
20105     KnownBits KnownOp1 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
20106 
20107     // The result is either:
20108     // CSINC: KnownOp0 or KnownOp1 + 1
20109     // CSINV: KnownOp0 or ~KnownOp1
20110     // CSNEG: KnownOp0 or KnownOp1 * -1
20111     if (Op.getOpcode() == ARMISD::CSINC)
20112       KnownOp1 = KnownBits::computeForAddSub(
20113           true, false, KnownOp1, KnownBits::makeConstant(APInt(32, 1)));
20114     else if (Op.getOpcode() == ARMISD::CSINV)
20115       std::swap(KnownOp1.Zero, KnownOp1.One);
20116     else if (Op.getOpcode() == ARMISD::CSNEG)
20117       KnownOp1 = KnownBits::mul(
20118           KnownOp1, KnownBits::makeConstant(APInt(32, -1)));
20119 
20120     Known = KnownOp0.intersectWith(KnownOp1);
20121     break;
20122   }
20123   }
20124 }
20125 
20126 bool ARMTargetLowering::targetShrinkDemandedConstant(
20127     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
20128     TargetLoweringOpt &TLO) const {
20129   // Delay optimization, so we don't have to deal with illegal types, or block
20130   // optimizations.
20131   if (!TLO.LegalOps)
20132     return false;
20133 
20134   // Only optimize AND for now.
20135   if (Op.getOpcode() != ISD::AND)
20136     return false;
20137 
20138   EVT VT = Op.getValueType();
20139 
20140   // Ignore vectors.
20141   if (VT.isVector())
20142     return false;
20143 
20144   assert(VT == MVT::i32 && "Unexpected integer type");
20145 
20146   // Make sure the RHS really is a constant.
20147   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
20148   if (!C)
20149     return false;
20150 
20151   unsigned Mask = C->getZExtValue();
20152 
20153   unsigned Demanded = DemandedBits.getZExtValue();
20154   unsigned ShrunkMask = Mask & Demanded;
20155   unsigned ExpandedMask = Mask | ~Demanded;
20156 
20157   // If the mask is all zeros, let the target-independent code replace the
20158   // result with zero.
20159   if (ShrunkMask == 0)
20160     return false;
20161 
20162   // If the mask is all ones, erase the AND. (Currently, the target-independent
20163   // code won't do this, so we have to do it explicitly to avoid an infinite
20164   // loop in obscure cases.)
20165   if (ExpandedMask == ~0U)
20166     return TLO.CombineTo(Op, Op.getOperand(0));
20167 
20168   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
20169     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
20170   };
20171   auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
20172     if (NewMask == Mask)
20173       return true;
20174     SDLoc DL(Op);
20175     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
20176     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
20177     return TLO.CombineTo(Op, NewOp);
20178   };
20179 
20180   // Prefer uxtb mask.
20181   if (IsLegalMask(0xFF))
20182     return UseMask(0xFF);
20183 
20184   // Prefer uxth mask.
20185   if (IsLegalMask(0xFFFF))
20186     return UseMask(0xFFFF);
20187 
20188   // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
20189   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
20190   if (ShrunkMask < 256)
20191     return UseMask(ShrunkMask);
20192 
20193   // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
20194   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
20195   if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
20196     return UseMask(ExpandedMask);
20197 
20198   // Potential improvements:
20199   //
20200   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
20201   // We could try to prefer Thumb1 immediates which can be lowered to a
20202   // two-instruction sequence.
20203   // We could try to recognize more legal ARM/Thumb2 immediates here.
20204 
20205   return false;
20206 }
20207 
20208 bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
20209     SDValue Op, const APInt &OriginalDemandedBits,
20210     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
20211     unsigned Depth) const {
20212   unsigned Opc = Op.getOpcode();
20213 
20214   switch (Opc) {
20215   case ARMISD::ASRL:
20216   case ARMISD::LSRL: {
20217     // If this is result 0 and the other result is unused, see if the demand
20218     // bits allow us to shrink this long shift into a standard small shift in
20219     // the opposite direction.
20220     if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
20221         isa<ConstantSDNode>(Op->getOperand(2))) {
20222       unsigned ShAmt = Op->getConstantOperandVal(2);
20223       if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(APInt::getAllOnes(32)
20224                                                         << (32 - ShAmt)))
20225         return TLO.CombineTo(
20226             Op, TLO.DAG.getNode(
20227                     ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),
20228                     TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32)));
20229     }
20230     break;
20231   }
20232   case ARMISD::VBICIMM: {
20233     SDValue Op0 = Op.getOperand(0);
20234     unsigned ModImm = Op.getConstantOperandVal(1);
20235     unsigned EltBits = 0;
20236     uint64_t Mask = ARM_AM::decodeVMOVModImm(ModImm, EltBits);
20237     if ((OriginalDemandedBits & Mask) == 0)
20238       return TLO.CombineTo(Op, Op0);
20239   }
20240   }
20241 
20242   return TargetLowering::SimplifyDemandedBitsForTargetNode(
20243       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
20244 }
20245 
20246 //===----------------------------------------------------------------------===//
20247 //                           ARM Inline Assembly Support
20248 //===----------------------------------------------------------------------===//
20249 
20250 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
20251   // Looking for "rev" which is V6+.
20252   if (!Subtarget->hasV6Ops())
20253     return false;
20254 
20255   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
20256   StringRef AsmStr = IA->getAsmString();
20257   SmallVector<StringRef, 4> AsmPieces;
20258   SplitString(AsmStr, AsmPieces, ";\n");
20259 
20260   switch (AsmPieces.size()) {
20261   default: return false;
20262   case 1:
20263     AsmStr = AsmPieces[0];
20264     AsmPieces.clear();
20265     SplitString(AsmStr, AsmPieces, " \t,");
20266 
20267     // rev $0, $1
20268     if (AsmPieces.size() == 3 &&
20269         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
20270         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
20271       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
20272       if (Ty && Ty->getBitWidth() == 32)
20273         return IntrinsicLowering::LowerToByteSwap(CI);
20274     }
20275     break;
20276   }
20277 
20278   return false;
20279 }
20280 
20281 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
20282   // At this point, we have to lower this constraint to something else, so we
20283   // lower it to an "r" or "w". However, by doing this we will force the result
20284   // to be in register, while the X constraint is much more permissive.
20285   //
20286   // Although we are correct (we are free to emit anything, without
20287   // constraints), we might break use cases that would expect us to be more
20288   // efficient and emit something else.
20289   if (!Subtarget->hasVFP2Base())
20290     return "r";
20291   if (ConstraintVT.isFloatingPoint())
20292     return "w";
20293   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
20294      (ConstraintVT.getSizeInBits() == 64 ||
20295       ConstraintVT.getSizeInBits() == 128))
20296     return "w";
20297 
20298   return "r";
20299 }
20300 
20301 /// getConstraintType - Given a constraint letter, return the type of
20302 /// constraint it is for this target.
20303 ARMTargetLowering::ConstraintType
20304 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
20305   unsigned S = Constraint.size();
20306   if (S == 1) {
20307     switch (Constraint[0]) {
20308     default:  break;
20309     case 'l': return C_RegisterClass;
20310     case 'w': return C_RegisterClass;
20311     case 'h': return C_RegisterClass;
20312     case 'x': return C_RegisterClass;
20313     case 't': return C_RegisterClass;
20314     case 'j': return C_Immediate; // Constant for movw.
20315     // An address with a single base register. Due to the way we
20316     // currently handle addresses it is the same as an 'r' memory constraint.
20317     case 'Q': return C_Memory;
20318     }
20319   } else if (S == 2) {
20320     switch (Constraint[0]) {
20321     default: break;
20322     case 'T': return C_RegisterClass;
20323     // All 'U+' constraints are addresses.
20324     case 'U': return C_Memory;
20325     }
20326   }
20327   return TargetLowering::getConstraintType(Constraint);
20328 }
20329 
20330 /// Examine constraint type and operand type and determine a weight value.
20331 /// This object must already have been set up with the operand type
20332 /// and the current alternative constraint selected.
20333 TargetLowering::ConstraintWeight
20334 ARMTargetLowering::getSingleConstraintMatchWeight(
20335     AsmOperandInfo &info, const char *constraint) const {
20336   ConstraintWeight weight = CW_Invalid;
20337   Value *CallOperandVal = info.CallOperandVal;
20338     // If we don't have a value, we can't do a match,
20339     // but allow it at the lowest weight.
20340   if (!CallOperandVal)
20341     return CW_Default;
20342   Type *type = CallOperandVal->getType();
20343   // Look at the constraint type.
20344   switch (*constraint) {
20345   default:
20346     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
20347     break;
20348   case 'l':
20349     if (type->isIntegerTy()) {
20350       if (Subtarget->isThumb())
20351         weight = CW_SpecificReg;
20352       else
20353         weight = CW_Register;
20354     }
20355     break;
20356   case 'w':
20357     if (type->isFloatingPointTy())
20358       weight = CW_Register;
20359     break;
20360   }
20361   return weight;
20362 }
20363 
20364 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
20365 
20366 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
20367     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
20368   switch (Constraint.size()) {
20369   case 1:
20370     // GCC ARM Constraint Letters
20371     switch (Constraint[0]) {
20372     case 'l': // Low regs or general regs.
20373       if (Subtarget->isThumb())
20374         return RCPair(0U, &ARM::tGPRRegClass);
20375       return RCPair(0U, &ARM::GPRRegClass);
20376     case 'h': // High regs or no regs.
20377       if (Subtarget->isThumb())
20378         return RCPair(0U, &ARM::hGPRRegClass);
20379       break;
20380     case 'r':
20381       if (Subtarget->isThumb1Only())
20382         return RCPair(0U, &ARM::tGPRRegClass);
20383       return RCPair(0U, &ARM::GPRRegClass);
20384     case 'w':
20385       if (VT == MVT::Other)
20386         break;
20387       if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16)
20388         return RCPair(0U, &ARM::SPRRegClass);
20389       if (VT.getSizeInBits() == 64)
20390         return RCPair(0U, &ARM::DPRRegClass);
20391       if (VT.getSizeInBits() == 128)
20392         return RCPair(0U, &ARM::QPRRegClass);
20393       break;
20394     case 'x':
20395       if (VT == MVT::Other)
20396         break;
20397       if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16)
20398         return RCPair(0U, &ARM::SPR_8RegClass);
20399       if (VT.getSizeInBits() == 64)
20400         return RCPair(0U, &ARM::DPR_8RegClass);
20401       if (VT.getSizeInBits() == 128)
20402         return RCPair(0U, &ARM::QPR_8RegClass);
20403       break;
20404     case 't':
20405       if (VT == MVT::Other)
20406         break;
20407       if (VT == MVT::f32 || VT == MVT::i32 || VT == MVT::f16 || VT == MVT::bf16)
20408         return RCPair(0U, &ARM::SPRRegClass);
20409       if (VT.getSizeInBits() == 64)
20410         return RCPair(0U, &ARM::DPR_VFP2RegClass);
20411       if (VT.getSizeInBits() == 128)
20412         return RCPair(0U, &ARM::QPR_VFP2RegClass);
20413       break;
20414     }
20415     break;
20416 
20417   case 2:
20418     if (Constraint[0] == 'T') {
20419       switch (Constraint[1]) {
20420       default:
20421         break;
20422       case 'e':
20423         return RCPair(0U, &ARM::tGPREvenRegClass);
20424       case 'o':
20425         return RCPair(0U, &ARM::tGPROddRegClass);
20426       }
20427     }
20428     break;
20429 
20430   default:
20431     break;
20432   }
20433 
20434   if (StringRef("{cc}").equals_insensitive(Constraint))
20435     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
20436 
20437   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
20438 }
20439 
20440 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
20441 /// vector.  If it is invalid, don't add anything to Ops.
20442 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
20443                                                      StringRef Constraint,
20444                                                      std::vector<SDValue> &Ops,
20445                                                      SelectionDAG &DAG) const {
20446   SDValue Result;
20447 
20448   // Currently only support length 1 constraints.
20449   if (Constraint.size() != 1)
20450     return;
20451 
20452   char ConstraintLetter = Constraint[0];
20453   switch (ConstraintLetter) {
20454   default: break;
20455   case 'j':
20456   case 'I': case 'J': case 'K': case 'L':
20457   case 'M': case 'N': case 'O':
20458     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
20459     if (!C)
20460       return;
20461 
20462     int64_t CVal64 = C->getSExtValue();
20463     int CVal = (int) CVal64;
20464     // None of these constraints allow values larger than 32 bits.  Check
20465     // that the value fits in an int.
20466     if (CVal != CVal64)
20467       return;
20468 
20469     switch (ConstraintLetter) {
20470       case 'j':
20471         // Constant suitable for movw, must be between 0 and
20472         // 65535.
20473         if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps()))
20474           if (CVal >= 0 && CVal <= 65535)
20475             break;
20476         return;
20477       case 'I':
20478         if (Subtarget->isThumb1Only()) {
20479           // This must be a constant between 0 and 255, for ADD
20480           // immediates.
20481           if (CVal >= 0 && CVal <= 255)
20482             break;
20483         } else if (Subtarget->isThumb2()) {
20484           // A constant that can be used as an immediate value in a
20485           // data-processing instruction.
20486           if (ARM_AM::getT2SOImmVal(CVal) != -1)
20487             break;
20488         } else {
20489           // A constant that can be used as an immediate value in a
20490           // data-processing instruction.
20491           if (ARM_AM::getSOImmVal(CVal) != -1)
20492             break;
20493         }
20494         return;
20495 
20496       case 'J':
20497         if (Subtarget->isThumb1Only()) {
20498           // This must be a constant between -255 and -1, for negated ADD
20499           // immediates. This can be used in GCC with an "n" modifier that
20500           // prints the negated value, for use with SUB instructions. It is
20501           // not useful otherwise but is implemented for compatibility.
20502           if (CVal >= -255 && CVal <= -1)
20503             break;
20504         } else {
20505           // This must be a constant between -4095 and 4095. It is not clear
20506           // what this constraint is intended for. Implemented for
20507           // compatibility with GCC.
20508           if (CVal >= -4095 && CVal <= 4095)
20509             break;
20510         }
20511         return;
20512 
20513       case 'K':
20514         if (Subtarget->isThumb1Only()) {
20515           // A 32-bit value where only one byte has a nonzero value. Exclude
20516           // zero to match GCC. This constraint is used by GCC internally for
20517           // constants that can be loaded with a move/shift combination.
20518           // It is not useful otherwise but is implemented for compatibility.
20519           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
20520             break;
20521         } else if (Subtarget->isThumb2()) {
20522           // A constant whose bitwise inverse can be used as an immediate
20523           // value in a data-processing instruction. This can be used in GCC
20524           // with a "B" modifier that prints the inverted value, for use with
20525           // BIC and MVN instructions. It is not useful otherwise but is
20526           // implemented for compatibility.
20527           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
20528             break;
20529         } else {
20530           // A constant whose bitwise inverse can be used as an immediate
20531           // value in a data-processing instruction. This can be used in GCC
20532           // with a "B" modifier that prints the inverted value, for use with
20533           // BIC and MVN instructions. It is not useful otherwise but is
20534           // implemented for compatibility.
20535           if (ARM_AM::getSOImmVal(~CVal) != -1)
20536             break;
20537         }
20538         return;
20539 
20540       case 'L':
20541         if (Subtarget->isThumb1Only()) {
20542           // This must be a constant between -7 and 7,
20543           // for 3-operand ADD/SUB immediate instructions.
20544           if (CVal >= -7 && CVal < 7)
20545             break;
20546         } else if (Subtarget->isThumb2()) {
20547           // A constant whose negation can be used as an immediate value in a
20548           // data-processing instruction. This can be used in GCC with an "n"
20549           // modifier that prints the negated value, for use with SUB
20550           // instructions. It is not useful otherwise but is implemented for
20551           // compatibility.
20552           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
20553             break;
20554         } else {
20555           // A constant whose negation can be used as an immediate value in a
20556           // data-processing instruction. This can be used in GCC with an "n"
20557           // modifier that prints the negated value, for use with SUB
20558           // instructions. It is not useful otherwise but is implemented for
20559           // compatibility.
20560           if (ARM_AM::getSOImmVal(-CVal) != -1)
20561             break;
20562         }
20563         return;
20564 
20565       case 'M':
20566         if (Subtarget->isThumb1Only()) {
20567           // This must be a multiple of 4 between 0 and 1020, for
20568           // ADD sp + immediate.
20569           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
20570             break;
20571         } else {
20572           // A power of two or a constant between 0 and 32.  This is used in
20573           // GCC for the shift amount on shifted register operands, but it is
20574           // useful in general for any shift amounts.
20575           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
20576             break;
20577         }
20578         return;
20579 
20580       case 'N':
20581         if (Subtarget->isThumb1Only()) {
20582           // This must be a constant between 0 and 31, for shift amounts.
20583           if (CVal >= 0 && CVal <= 31)
20584             break;
20585         }
20586         return;
20587 
20588       case 'O':
20589         if (Subtarget->isThumb1Only()) {
20590           // This must be a multiple of 4 between -508 and 508, for
20591           // ADD/SUB sp = sp + immediate.
20592           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
20593             break;
20594         }
20595         return;
20596     }
20597     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
20598     break;
20599   }
20600 
20601   if (Result.getNode()) {
20602     Ops.push_back(Result);
20603     return;
20604   }
20605   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
20606 }
20607 
20608 static RTLIB::Libcall getDivRemLibcall(
20609     const SDNode *N, MVT::SimpleValueType SVT) {
20610   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
20611           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
20612          "Unhandled Opcode in getDivRemLibcall");
20613   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
20614                   N->getOpcode() == ISD::SREM;
20615   RTLIB::Libcall LC;
20616   switch (SVT) {
20617   default: llvm_unreachable("Unexpected request for libcall!");
20618   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
20619   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
20620   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
20621   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
20622   }
20623   return LC;
20624 }
20625 
20626 static TargetLowering::ArgListTy getDivRemArgList(
20627     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
20628   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
20629           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
20630          "Unhandled Opcode in getDivRemArgList");
20631   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
20632                   N->getOpcode() == ISD::SREM;
20633   TargetLowering::ArgListTy Args;
20634   TargetLowering::ArgListEntry Entry;
20635   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
20636     EVT ArgVT = N->getOperand(i).getValueType();
20637     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
20638     Entry.Node = N->getOperand(i);
20639     Entry.Ty = ArgTy;
20640     Entry.IsSExt = isSigned;
20641     Entry.IsZExt = !isSigned;
20642     Args.push_back(Entry);
20643   }
20644   if (Subtarget->isTargetWindows() && Args.size() >= 2)
20645     std::swap(Args[0], Args[1]);
20646   return Args;
20647 }
20648 
20649 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
20650   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
20651           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
20652           Subtarget->isTargetWindows()) &&
20653          "Register-based DivRem lowering only");
20654   unsigned Opcode = Op->getOpcode();
20655   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
20656          "Invalid opcode for Div/Rem lowering");
20657   bool isSigned = (Opcode == ISD::SDIVREM);
20658   EVT VT = Op->getValueType(0);
20659   SDLoc dl(Op);
20660 
20661   if (VT == MVT::i64 && isa<ConstantSDNode>(Op.getOperand(1))) {
20662     SmallVector<SDValue> Result;
20663     if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i32, DAG)) {
20664         SDValue Res0 =
20665             DAG.getNode(ISD::BUILD_PAIR, dl, VT, Result[0], Result[1]);
20666         SDValue Res1 =
20667             DAG.getNode(ISD::BUILD_PAIR, dl, VT, Result[2], Result[3]);
20668         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
20669                            {Res0, Res1});
20670     }
20671   }
20672 
20673   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
20674 
20675   // If the target has hardware divide, use divide + multiply + subtract:
20676   //     div = a / b
20677   //     rem = a - b * div
20678   //     return {div, rem}
20679   // This should be lowered into UDIV/SDIV + MLS later on.
20680   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
20681                                         : Subtarget->hasDivideInARMMode();
20682   if (hasDivide && Op->getValueType(0).isSimple() &&
20683       Op->getSimpleValueType(0) == MVT::i32) {
20684     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
20685     const SDValue Dividend = Op->getOperand(0);
20686     const SDValue Divisor = Op->getOperand(1);
20687     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
20688     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
20689     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
20690 
20691     SDValue Values[2] = {Div, Rem};
20692     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
20693   }
20694 
20695   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
20696                                        VT.getSimpleVT().SimpleTy);
20697   SDValue InChain = DAG.getEntryNode();
20698 
20699   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
20700                                                     DAG.getContext(),
20701                                                     Subtarget);
20702 
20703   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
20704                                          getPointerTy(DAG.getDataLayout()));
20705 
20706   Type *RetTy = StructType::get(Ty, Ty);
20707 
20708   if (Subtarget->isTargetWindows())
20709     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
20710 
20711   TargetLowering::CallLoweringInfo CLI(DAG);
20712   CLI.setDebugLoc(dl).setChain(InChain)
20713     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
20714     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
20715 
20716   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
20717   return CallInfo.first;
20718 }
20719 
20720 // Lowers REM using divmod helpers
20721 // see RTABI section 4.2/4.3
20722 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
20723   EVT VT = N->getValueType(0);
20724 
20725   if (VT == MVT::i64 && isa<ConstantSDNode>(N->getOperand(1))) {
20726     SmallVector<SDValue> Result;
20727     if (expandDIVREMByConstant(N, Result, MVT::i32, DAG))
20728         return DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), N->getValueType(0),
20729                            Result[0], Result[1]);
20730   }
20731 
20732   // Build return types (div and rem)
20733   std::vector<Type*> RetTyParams;
20734   Type *RetTyElement;
20735 
20736   switch (VT.getSimpleVT().SimpleTy) {
20737   default: llvm_unreachable("Unexpected request for libcall!");
20738   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
20739   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
20740   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
20741   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
20742   }
20743 
20744   RetTyParams.push_back(RetTyElement);
20745   RetTyParams.push_back(RetTyElement);
20746   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
20747   Type *RetTy = StructType::get(*DAG.getContext(), ret);
20748 
20749   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
20750                                                              SimpleTy);
20751   SDValue InChain = DAG.getEntryNode();
20752   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
20753                                                     Subtarget);
20754   bool isSigned = N->getOpcode() == ISD::SREM;
20755   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
20756                                          getPointerTy(DAG.getDataLayout()));
20757 
20758   if (Subtarget->isTargetWindows())
20759     InChain = WinDBZCheckDenominator(DAG, N, InChain);
20760 
20761   // Lower call
20762   CallLoweringInfo CLI(DAG);
20763   CLI.setChain(InChain)
20764      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
20765      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
20766   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
20767 
20768   // Return second (rem) result operand (first contains div)
20769   SDNode *ResNode = CallResult.first.getNode();
20770   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
20771   return ResNode->getOperand(1);
20772 }
20773 
20774 SDValue
20775 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
20776   assert(Subtarget->isTargetWindows() && "unsupported target platform");
20777   SDLoc DL(Op);
20778 
20779   // Get the inputs.
20780   SDValue Chain = Op.getOperand(0);
20781   SDValue Size  = Op.getOperand(1);
20782 
20783   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
20784           "no-stack-arg-probe")) {
20785     MaybeAlign Align =
20786         cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
20787     SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
20788     Chain = SP.getValue(1);
20789     SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
20790     if (Align)
20791       SP =
20792           DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
20793                       DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32));
20794     Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
20795     SDValue Ops[2] = { SP, Chain };
20796     return DAG.getMergeValues(Ops, DL);
20797   }
20798 
20799   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
20800                               DAG.getConstant(2, DL, MVT::i32));
20801 
20802   SDValue Glue;
20803   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Glue);
20804   Glue = Chain.getValue(1);
20805 
20806   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20807   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Glue);
20808 
20809   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
20810   Chain = NewSP.getValue(1);
20811 
20812   SDValue Ops[2] = { NewSP, Chain };
20813   return DAG.getMergeValues(Ops, DL);
20814 }
20815 
20816 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20817   bool IsStrict = Op->isStrictFPOpcode();
20818   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
20819   const unsigned DstSz = Op.getValueType().getSizeInBits();
20820   const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
20821   assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
20822          "Unexpected type for custom-lowering FP_EXTEND");
20823 
20824   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
20825          "With both FP DP and 16, any FP conversion is legal!");
20826 
20827   assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
20828          "With FP16, 16 to 32 conversion is legal!");
20829 
20830   // Converting from 32 -> 64 is valid if we have FP64.
20831   if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) {
20832     // FIXME: Remove this when we have strict fp instruction selection patterns
20833     if (IsStrict) {
20834       SDLoc Loc(Op);
20835       SDValue Result = DAG.getNode(ISD::FP_EXTEND,
20836                                    Loc, Op.getValueType(), SrcVal);
20837       return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
20838     }
20839     return Op;
20840   }
20841 
20842   // Either we are converting from 16 -> 64, without FP16 and/or
20843   // FP.double-precision or without Armv8-fp. So we must do it in two
20844   // steps.
20845   // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
20846   // without FP16. So we must do a function call.
20847   SDLoc Loc(Op);
20848   RTLIB::Libcall LC;
20849   MakeLibCallOptions CallOptions;
20850   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20851   for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
20852     bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64());
20853     MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32);
20854     MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64);
20855     if (Supported) {
20856       if (IsStrict) {
20857         SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc,
20858                              {DstVT, MVT::Other}, {Chain, SrcVal});
20859         Chain = SrcVal.getValue(1);
20860       } else {
20861         SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal);
20862       }
20863     } else {
20864       LC = RTLIB::getFPEXT(SrcVT, DstVT);
20865       assert(LC != RTLIB::UNKNOWN_LIBCALL &&
20866              "Unexpected type for custom-lowering FP_EXTEND");
20867       std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
20868                                             Loc, Chain);
20869     }
20870   }
20871 
20872   return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
20873 }
20874 
20875 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
20876   bool IsStrict = Op->isStrictFPOpcode();
20877 
20878   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
20879   EVT SrcVT = SrcVal.getValueType();
20880   EVT DstVT = Op.getValueType();
20881   const unsigned DstSz = Op.getValueType().getSizeInBits();
20882   const unsigned SrcSz = SrcVT.getSizeInBits();
20883   (void)DstSz;
20884   assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
20885          "Unexpected type for custom-lowering FP_ROUND");
20886 
20887   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
20888          "With both FP DP and 16, any FP conversion is legal!");
20889 
20890   SDLoc Loc(Op);
20891 
20892   // Instruction from 32 -> 16 if hasFP16 is valid
20893   if (SrcSz == 32 && Subtarget->hasFP16())
20894     return Op;
20895 
20896   // Lib call from 32 -> 16 / 64 -> [32, 16]
20897   RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
20898   assert(LC != RTLIB::UNKNOWN_LIBCALL &&
20899          "Unexpected type for custom-lowering FP_ROUND");
20900   MakeLibCallOptions CallOptions;
20901   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20902   SDValue Result;
20903   std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
20904                                         Loc, Chain);
20905   return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
20906 }
20907 
20908 bool
20909 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
20910   // The ARM target isn't yet aware of offsets.
20911   return false;
20912 }
20913 
20914 bool ARM::isBitFieldInvertedMask(unsigned v) {
20915   if (v == 0xffffffff)
20916     return false;
20917 
20918   // there can be 1's on either or both "outsides", all the "inside"
20919   // bits must be 0's
20920   return isShiftedMask_32(~v);
20921 }
20922 
20923 /// isFPImmLegal - Returns true if the target can instruction select the
20924 /// specified FP immediate natively. If false, the legalizer will
20925 /// materialize the FP immediate as a load from a constant pool.
20926 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
20927                                      bool ForCodeSize) const {
20928   if (!Subtarget->hasVFP3Base())
20929     return false;
20930   if (VT == MVT::f16 && Subtarget->hasFullFP16())
20931     return ARM_AM::getFP16Imm(Imm) != -1;
20932   if (VT == MVT::f32 && Subtarget->hasFullFP16() &&
20933       ARM_AM::getFP32FP16Imm(Imm) != -1)
20934     return true;
20935   if (VT == MVT::f32)
20936     return ARM_AM::getFP32Imm(Imm) != -1;
20937   if (VT == MVT::f64 && Subtarget->hasFP64())
20938     return ARM_AM::getFP64Imm(Imm) != -1;
20939   return false;
20940 }
20941 
20942 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
20943 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
20944 /// specified in the intrinsic calls.
20945 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
20946                                            const CallInst &I,
20947                                            MachineFunction &MF,
20948                                            unsigned Intrinsic) const {
20949   switch (Intrinsic) {
20950   case Intrinsic::arm_neon_vld1:
20951   case Intrinsic::arm_neon_vld2:
20952   case Intrinsic::arm_neon_vld3:
20953   case Intrinsic::arm_neon_vld4:
20954   case Intrinsic::arm_neon_vld2lane:
20955   case Intrinsic::arm_neon_vld3lane:
20956   case Intrinsic::arm_neon_vld4lane:
20957   case Intrinsic::arm_neon_vld2dup:
20958   case Intrinsic::arm_neon_vld3dup:
20959   case Intrinsic::arm_neon_vld4dup: {
20960     Info.opc = ISD::INTRINSIC_W_CHAIN;
20961     // Conservatively set memVT to the entire set of vectors loaded.
20962     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
20963     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
20964     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
20965     Info.ptrVal = I.getArgOperand(0);
20966     Info.offset = 0;
20967     Value *AlignArg = I.getArgOperand(I.arg_size() - 1);
20968     Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
20969     // volatile loads with NEON intrinsics not supported
20970     Info.flags = MachineMemOperand::MOLoad;
20971     return true;
20972   }
20973   case Intrinsic::arm_neon_vld1x2:
20974   case Intrinsic::arm_neon_vld1x3:
20975   case Intrinsic::arm_neon_vld1x4: {
20976     Info.opc = ISD::INTRINSIC_W_CHAIN;
20977     // Conservatively set memVT to the entire set of vectors loaded.
20978     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
20979     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
20980     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
20981     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
20982     Info.offset = 0;
20983     Info.align.reset();
20984     // volatile loads with NEON intrinsics not supported
20985     Info.flags = MachineMemOperand::MOLoad;
20986     return true;
20987   }
20988   case Intrinsic::arm_neon_vst1:
20989   case Intrinsic::arm_neon_vst2:
20990   case Intrinsic::arm_neon_vst3:
20991   case Intrinsic::arm_neon_vst4:
20992   case Intrinsic::arm_neon_vst2lane:
20993   case Intrinsic::arm_neon_vst3lane:
20994   case Intrinsic::arm_neon_vst4lane: {
20995     Info.opc = ISD::INTRINSIC_VOID;
20996     // Conservatively set memVT to the entire set of vectors stored.
20997     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
20998     unsigned NumElts = 0;
20999     for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) {
21000       Type *ArgTy = I.getArgOperand(ArgI)->getType();
21001       if (!ArgTy->isVectorTy())
21002         break;
21003       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
21004     }
21005     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
21006     Info.ptrVal = I.getArgOperand(0);
21007     Info.offset = 0;
21008     Value *AlignArg = I.getArgOperand(I.arg_size() - 1);
21009     Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
21010     // volatile stores with NEON intrinsics not supported
21011     Info.flags = MachineMemOperand::MOStore;
21012     return true;
21013   }
21014   case Intrinsic::arm_neon_vst1x2:
21015   case Intrinsic::arm_neon_vst1x3:
21016   case Intrinsic::arm_neon_vst1x4: {
21017     Info.opc = ISD::INTRINSIC_VOID;
21018     // Conservatively set memVT to the entire set of vectors stored.
21019     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
21020     unsigned NumElts = 0;
21021     for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) {
21022       Type *ArgTy = I.getArgOperand(ArgI)->getType();
21023       if (!ArgTy->isVectorTy())
21024         break;
21025       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
21026     }
21027     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
21028     Info.ptrVal = I.getArgOperand(0);
21029     Info.offset = 0;
21030     Info.align.reset();
21031     // volatile stores with NEON intrinsics not supported
21032     Info.flags = MachineMemOperand::MOStore;
21033     return true;
21034   }
21035   case Intrinsic::arm_mve_vld2q:
21036   case Intrinsic::arm_mve_vld4q: {
21037     Info.opc = ISD::INTRINSIC_W_CHAIN;
21038     // Conservatively set memVT to the entire set of vectors loaded.
21039     Type *VecTy = cast<StructType>(I.getType())->getElementType(1);
21040     unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4;
21041     Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
21042     Info.ptrVal = I.getArgOperand(0);
21043     Info.offset = 0;
21044     Info.align = Align(VecTy->getScalarSizeInBits() / 8);
21045     // volatile loads with MVE intrinsics not supported
21046     Info.flags = MachineMemOperand::MOLoad;
21047     return true;
21048   }
21049   case Intrinsic::arm_mve_vst2q:
21050   case Intrinsic::arm_mve_vst4q: {
21051     Info.opc = ISD::INTRINSIC_VOID;
21052     // Conservatively set memVT to the entire set of vectors stored.
21053     Type *VecTy = I.getArgOperand(1)->getType();
21054     unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4;
21055     Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
21056     Info.ptrVal = I.getArgOperand(0);
21057     Info.offset = 0;
21058     Info.align = Align(VecTy->getScalarSizeInBits() / 8);
21059     // volatile stores with MVE intrinsics not supported
21060     Info.flags = MachineMemOperand::MOStore;
21061     return true;
21062   }
21063   case Intrinsic::arm_mve_vldr_gather_base:
21064   case Intrinsic::arm_mve_vldr_gather_base_predicated: {
21065     Info.opc = ISD::INTRINSIC_W_CHAIN;
21066     Info.ptrVal = nullptr;
21067     Info.memVT = MVT::getVT(I.getType());
21068     Info.align = Align(1);
21069     Info.flags |= MachineMemOperand::MOLoad;
21070     return true;
21071   }
21072   case Intrinsic::arm_mve_vldr_gather_base_wb:
21073   case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
21074     Info.opc = ISD::INTRINSIC_W_CHAIN;
21075     Info.ptrVal = nullptr;
21076     Info.memVT = MVT::getVT(I.getType()->getContainedType(0));
21077     Info.align = Align(1);
21078     Info.flags |= MachineMemOperand::MOLoad;
21079     return true;
21080   }
21081   case Intrinsic::arm_mve_vldr_gather_offset:
21082   case Intrinsic::arm_mve_vldr_gather_offset_predicated: {
21083     Info.opc = ISD::INTRINSIC_W_CHAIN;
21084     Info.ptrVal = nullptr;
21085     MVT DataVT = MVT::getVT(I.getType());
21086     unsigned MemSize = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
21087     Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize),
21088                                   DataVT.getVectorNumElements());
21089     Info.align = Align(1);
21090     Info.flags |= MachineMemOperand::MOLoad;
21091     return true;
21092   }
21093   case Intrinsic::arm_mve_vstr_scatter_base:
21094   case Intrinsic::arm_mve_vstr_scatter_base_predicated: {
21095     Info.opc = ISD::INTRINSIC_VOID;
21096     Info.ptrVal = nullptr;
21097     Info.memVT = MVT::getVT(I.getArgOperand(2)->getType());
21098     Info.align = Align(1);
21099     Info.flags |= MachineMemOperand::MOStore;
21100     return true;
21101   }
21102   case Intrinsic::arm_mve_vstr_scatter_base_wb:
21103   case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: {
21104     Info.opc = ISD::INTRINSIC_W_CHAIN;
21105     Info.ptrVal = nullptr;
21106     Info.memVT = MVT::getVT(I.getArgOperand(2)->getType());
21107     Info.align = Align(1);
21108     Info.flags |= MachineMemOperand::MOStore;
21109     return true;
21110   }
21111   case Intrinsic::arm_mve_vstr_scatter_offset:
21112   case Intrinsic::arm_mve_vstr_scatter_offset_predicated: {
21113     Info.opc = ISD::INTRINSIC_VOID;
21114     Info.ptrVal = nullptr;
21115     MVT DataVT = MVT::getVT(I.getArgOperand(2)->getType());
21116     unsigned MemSize = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
21117     Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize),
21118                                   DataVT.getVectorNumElements());
21119     Info.align = Align(1);
21120     Info.flags |= MachineMemOperand::MOStore;
21121     return true;
21122   }
21123   case Intrinsic::arm_ldaex:
21124   case Intrinsic::arm_ldrex: {
21125     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
21126     Type *ValTy = I.getParamElementType(0);
21127     Info.opc = ISD::INTRINSIC_W_CHAIN;
21128     Info.memVT = MVT::getVT(ValTy);
21129     Info.ptrVal = I.getArgOperand(0);
21130     Info.offset = 0;
21131     Info.align = DL.getABITypeAlign(ValTy);
21132     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
21133     return true;
21134   }
21135   case Intrinsic::arm_stlex:
21136   case Intrinsic::arm_strex: {
21137     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
21138     Type *ValTy = I.getParamElementType(1);
21139     Info.opc = ISD::INTRINSIC_W_CHAIN;
21140     Info.memVT = MVT::getVT(ValTy);
21141     Info.ptrVal = I.getArgOperand(1);
21142     Info.offset = 0;
21143     Info.align = DL.getABITypeAlign(ValTy);
21144     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
21145     return true;
21146   }
21147   case Intrinsic::arm_stlexd:
21148   case Intrinsic::arm_strexd:
21149     Info.opc = ISD::INTRINSIC_W_CHAIN;
21150     Info.memVT = MVT::i64;
21151     Info.ptrVal = I.getArgOperand(2);
21152     Info.offset = 0;
21153     Info.align = Align(8);
21154     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
21155     return true;
21156 
21157   case Intrinsic::arm_ldaexd:
21158   case Intrinsic::arm_ldrexd:
21159     Info.opc = ISD::INTRINSIC_W_CHAIN;
21160     Info.memVT = MVT::i64;
21161     Info.ptrVal = I.getArgOperand(0);
21162     Info.offset = 0;
21163     Info.align = Align(8);
21164     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
21165     return true;
21166 
21167   default:
21168     break;
21169   }
21170 
21171   return false;
21172 }
21173 
21174 /// Returns true if it is beneficial to convert a load of a constant
21175 /// to just the constant itself.
21176 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
21177                                                           Type *Ty) const {
21178   assert(Ty->isIntegerTy());
21179 
21180   unsigned Bits = Ty->getPrimitiveSizeInBits();
21181   if (Bits == 0 || Bits > 32)
21182     return false;
21183   return true;
21184 }
21185 
21186 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
21187                                                 unsigned Index) const {
21188   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
21189     return false;
21190 
21191   return (Index == 0 || Index == ResVT.getVectorNumElements());
21192 }
21193 
21194 Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
21195                                         ARM_MB::MemBOpt Domain) const {
21196   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
21197 
21198   // First, if the target has no DMB, see what fallback we can use.
21199   if (!Subtarget->hasDataBarrier()) {
21200     // Some ARMv6 cpus can support data barriers with an mcr instruction.
21201     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
21202     // here.
21203     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
21204       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
21205       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
21206                         Builder.getInt32(0), Builder.getInt32(7),
21207                         Builder.getInt32(10), Builder.getInt32(5)};
21208       return Builder.CreateCall(MCR, args);
21209     } else {
21210       // Instead of using barriers, atomic accesses on these subtargets use
21211       // libcalls.
21212       llvm_unreachable("makeDMB on a target so old that it has no barriers");
21213     }
21214   } else {
21215     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
21216     // Only a full system barrier exists in the M-class architectures.
21217     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
21218     Constant *CDomain = Builder.getInt32(Domain);
21219     return Builder.CreateCall(DMB, CDomain);
21220   }
21221 }
21222 
21223 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
21224 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
21225                                                  Instruction *Inst,
21226                                                  AtomicOrdering Ord) const {
21227   switch (Ord) {
21228   case AtomicOrdering::NotAtomic:
21229   case AtomicOrdering::Unordered:
21230     llvm_unreachable("Invalid fence: unordered/non-atomic");
21231   case AtomicOrdering::Monotonic:
21232   case AtomicOrdering::Acquire:
21233     return nullptr; // Nothing to do
21234   case AtomicOrdering::SequentiallyConsistent:
21235     if (!Inst->hasAtomicStore())
21236       return nullptr; // Nothing to do
21237     [[fallthrough]];
21238   case AtomicOrdering::Release:
21239   case AtomicOrdering::AcquireRelease:
21240     if (Subtarget->preferISHSTBarriers())
21241       return makeDMB(Builder, ARM_MB::ISHST);
21242     // FIXME: add a comment with a link to documentation justifying this.
21243     else
21244       return makeDMB(Builder, ARM_MB::ISH);
21245   }
21246   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
21247 }
21248 
21249 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
21250                                                   Instruction *Inst,
21251                                                   AtomicOrdering Ord) const {
21252   switch (Ord) {
21253   case AtomicOrdering::NotAtomic:
21254   case AtomicOrdering::Unordered:
21255     llvm_unreachable("Invalid fence: unordered/not-atomic");
21256   case AtomicOrdering::Monotonic:
21257   case AtomicOrdering::Release:
21258     return nullptr; // Nothing to do
21259   case AtomicOrdering::Acquire:
21260   case AtomicOrdering::AcquireRelease:
21261   case AtomicOrdering::SequentiallyConsistent:
21262     return makeDMB(Builder, ARM_MB::ISH);
21263   }
21264   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
21265 }
21266 
21267 // Loads and stores less than 64-bits are already atomic; ones above that
21268 // are doomed anyway, so defer to the default libcall and blame the OS when
21269 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
21270 // anything for those.
21271 TargetLoweringBase::AtomicExpansionKind
21272 ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
21273   bool has64BitAtomicStore;
21274   if (Subtarget->isMClass())
21275     has64BitAtomicStore = false;
21276   else if (Subtarget->isThumb())
21277     has64BitAtomicStore = Subtarget->hasV7Ops();
21278   else
21279     has64BitAtomicStore = Subtarget->hasV6Ops();
21280 
21281   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
21282   return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand
21283                                            : AtomicExpansionKind::None;
21284 }
21285 
21286 // Loads and stores less than 64-bits are already atomic; ones above that
21287 // are doomed anyway, so defer to the default libcall and blame the OS when
21288 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
21289 // anything for those.
21290 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
21291 // guarantee, see DDI0406C ARM architecture reference manual,
21292 // sections A8.8.72-74 LDRD)
21293 TargetLowering::AtomicExpansionKind
21294 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
21295   bool has64BitAtomicLoad;
21296   if (Subtarget->isMClass())
21297     has64BitAtomicLoad = false;
21298   else if (Subtarget->isThumb())
21299     has64BitAtomicLoad = Subtarget->hasV7Ops();
21300   else
21301     has64BitAtomicLoad = Subtarget->hasV6Ops();
21302 
21303   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
21304   return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly
21305                                             : AtomicExpansionKind::None;
21306 }
21307 
21308 // For the real atomic operations, we have ldrex/strex up to 32 bits,
21309 // and up to 64 bits on the non-M profiles
21310 TargetLowering::AtomicExpansionKind
21311 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
21312   if (AI->isFloatingPointOperation())
21313     return AtomicExpansionKind::CmpXChg;
21314 
21315   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
21316   bool hasAtomicRMW;
21317   if (Subtarget->isMClass())
21318     hasAtomicRMW = Subtarget->hasV8MBaselineOps();
21319   else if (Subtarget->isThumb())
21320     hasAtomicRMW = Subtarget->hasV7Ops();
21321   else
21322     hasAtomicRMW = Subtarget->hasV6Ops();
21323   if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) {
21324     // At -O0, fast-regalloc cannot cope with the live vregs necessary to
21325     // implement atomicrmw without spilling. If the target address is also on
21326     // the stack and close enough to the spill slot, this can lead to a
21327     // situation where the monitor always gets cleared and the atomic operation
21328     // can never succeed. So at -O0 lower this operation to a CAS loop.
21329     if (getTargetMachine().getOptLevel() == CodeGenOptLevel::None)
21330       return AtomicExpansionKind::CmpXChg;
21331     return AtomicExpansionKind::LLSC;
21332   }
21333   return AtomicExpansionKind::None;
21334 }
21335 
21336 // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used  up to 32
21337 // bits, and up to 64 bits on the non-M profiles.
21338 TargetLowering::AtomicExpansionKind
21339 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
21340   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
21341   // implement cmpxchg without spilling. If the address being exchanged is also
21342   // on the stack and close enough to the spill slot, this can lead to a
21343   // situation where the monitor always gets cleared and the atomic operation
21344   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
21345   unsigned Size = AI->getOperand(1)->getType()->getPrimitiveSizeInBits();
21346   bool HasAtomicCmpXchg;
21347   if (Subtarget->isMClass())
21348     HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps();
21349   else if (Subtarget->isThumb())
21350     HasAtomicCmpXchg = Subtarget->hasV7Ops();
21351   else
21352     HasAtomicCmpXchg = Subtarget->hasV6Ops();
21353   if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None &&
21354       HasAtomicCmpXchg && Size <= (Subtarget->isMClass() ? 32U : 64U))
21355     return AtomicExpansionKind::LLSC;
21356   return AtomicExpansionKind::None;
21357 }
21358 
21359 bool ARMTargetLowering::shouldInsertFencesForAtomic(
21360     const Instruction *I) const {
21361   return InsertFencesForAtomic;
21362 }
21363 
21364 bool ARMTargetLowering::useLoadStackGuardNode() const {
21365   // ROPI/RWPI are not supported currently.
21366   return !Subtarget->isROPI() && !Subtarget->isRWPI();
21367 }
21368 
21369 void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
21370   if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
21371     return TargetLowering::insertSSPDeclarations(M);
21372 
21373   // MSVC CRT has a global variable holding security cookie.
21374   M.getOrInsertGlobal("__security_cookie",
21375                       PointerType::getUnqual(M.getContext()));
21376 
21377   // MSVC CRT has a function to validate security cookie.
21378   FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
21379       "__security_check_cookie", Type::getVoidTy(M.getContext()),
21380       PointerType::getUnqual(M.getContext()));
21381   if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
21382     F->addParamAttr(0, Attribute::AttrKind::InReg);
21383 }
21384 
21385 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
21386   // MSVC CRT has a global variable holding security cookie.
21387   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
21388     return M.getGlobalVariable("__security_cookie");
21389   return TargetLowering::getSDagStackGuard(M);
21390 }
21391 
21392 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
21393   // MSVC CRT has a function to validate security cookie.
21394   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
21395     return M.getFunction("__security_check_cookie");
21396   return TargetLowering::getSSPStackGuardCheck(M);
21397 }
21398 
21399 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
21400                                                   unsigned &Cost) const {
21401   // If we do not have NEON, vector types are not natively supported.
21402   if (!Subtarget->hasNEON())
21403     return false;
21404 
21405   // Floating point values and vector values map to the same register file.
21406   // Therefore, although we could do a store extract of a vector type, this is
21407   // better to leave at float as we have more freedom in the addressing mode for
21408   // those.
21409   if (VectorTy->isFPOrFPVectorTy())
21410     return false;
21411 
21412   // If the index is unknown at compile time, this is very expensive to lower
21413   // and it is not possible to combine the store with the extract.
21414   if (!isa<ConstantInt>(Idx))
21415     return false;
21416 
21417   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
21418   unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue();
21419   // We can do a store + vector extract on any vector that fits perfectly in a D
21420   // or Q register.
21421   if (BitWidth == 64 || BitWidth == 128) {
21422     Cost = 0;
21423     return true;
21424   }
21425   return false;
21426 }
21427 
21428 bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
21429   return Subtarget->hasV6T2Ops();
21430 }
21431 
21432 bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
21433   return Subtarget->hasV6T2Ops();
21434 }
21435 
21436 bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial(
21437     const Instruction &AndI) const {
21438   if (!Subtarget->hasV7Ops())
21439     return false;
21440 
21441   // Sink the `and` instruction only if the mask would fit into a modified
21442   // immediate operand.
21443   ConstantInt *Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
21444   if (!Mask || Mask->getValue().getBitWidth() > 32u)
21445     return false;
21446   auto MaskVal = unsigned(Mask->getValue().getZExtValue());
21447   return (Subtarget->isThumb2() ? ARM_AM::getT2SOImmVal(MaskVal)
21448                                 : ARM_AM::getSOImmVal(MaskVal)) != -1;
21449 }
21450 
21451 TargetLowering::ShiftLegalizationStrategy
21452 ARMTargetLowering::preferredShiftLegalizationStrategy(
21453     SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
21454   if (Subtarget->hasMinSize() && !Subtarget->isTargetWindows())
21455     return ShiftLegalizationStrategy::LowerToLibcall;
21456   return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
21457                                                             ExpansionFactor);
21458 }
21459 
21460 Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
21461                                          Value *Addr,
21462                                          AtomicOrdering Ord) const {
21463   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
21464   bool IsAcquire = isAcquireOrStronger(Ord);
21465 
21466   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
21467   // intrinsic must return {i32, i32} and we have to recombine them into a
21468   // single i64 here.
21469   if (ValueTy->getPrimitiveSizeInBits() == 64) {
21470     Intrinsic::ID Int =
21471         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
21472     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
21473 
21474     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
21475 
21476     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
21477     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
21478     if (!Subtarget->isLittle())
21479       std::swap (Lo, Hi);
21480     Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
21481     Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
21482     return Builder.CreateOr(
21483         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 32)), "val64");
21484   }
21485 
21486   Type *Tys[] = { Addr->getType() };
21487   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
21488   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
21489   CallInst *CI = Builder.CreateCall(Ldrex, Addr);
21490 
21491   CI->addParamAttr(
21492       0, Attribute::get(M->getContext(), Attribute::ElementType, ValueTy));
21493   return Builder.CreateTruncOrBitCast(CI, ValueTy);
21494 }
21495 
21496 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
21497     IRBuilderBase &Builder) const {
21498   if (!Subtarget->hasV7Ops())
21499     return;
21500   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
21501   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
21502 }
21503 
21504 Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
21505                                                Value *Val, Value *Addr,
21506                                                AtomicOrdering Ord) const {
21507   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
21508   bool IsRelease = isReleaseOrStronger(Ord);
21509 
21510   // Since the intrinsics must have legal type, the i64 intrinsics take two
21511   // parameters: "i32, i32". We must marshal Val into the appropriate form
21512   // before the call.
21513   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
21514     Intrinsic::ID Int =
21515         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
21516     Function *Strex = Intrinsic::getDeclaration(M, Int);
21517     Type *Int32Ty = Type::getInt32Ty(M->getContext());
21518 
21519     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
21520     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
21521     if (!Subtarget->isLittle())
21522       std::swap(Lo, Hi);
21523     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
21524   }
21525 
21526   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
21527   Type *Tys[] = { Addr->getType() };
21528   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
21529 
21530   CallInst *CI = Builder.CreateCall(
21531       Strex, {Builder.CreateZExtOrBitCast(
21532                   Val, Strex->getFunctionType()->getParamType(0)),
21533               Addr});
21534   CI->addParamAttr(1, Attribute::get(M->getContext(), Attribute::ElementType,
21535                                      Val->getType()));
21536   return CI;
21537 }
21538 
21539 
21540 bool ARMTargetLowering::alignLoopsWithOptSize() const {
21541   return Subtarget->isMClass();
21542 }
21543 
21544 /// A helper function for determining the number of interleaved accesses we
21545 /// will generate when lowering accesses of the given type.
21546 unsigned
21547 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
21548                                              const DataLayout &DL) const {
21549   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
21550 }
21551 
21552 bool ARMTargetLowering::isLegalInterleavedAccessType(
21553     unsigned Factor, FixedVectorType *VecTy, Align Alignment,
21554     const DataLayout &DL) const {
21555 
21556   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
21557   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
21558 
21559   if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps())
21560     return false;
21561 
21562   // Ensure the vector doesn't have f16 elements. Even though we could do an
21563   // i16 vldN, we can't hold the f16 vectors and will end up converting via
21564   // f32.
21565   if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy())
21566     return false;
21567   if (Subtarget->hasMVEIntegerOps() && Factor == 3)
21568     return false;
21569 
21570   // Ensure the number of vector elements is greater than 1.
21571   if (VecTy->getNumElements() < 2)
21572     return false;
21573 
21574   // Ensure the element type is legal.
21575   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
21576     return false;
21577   // And the alignment if high enough under MVE.
21578   if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8)
21579     return false;
21580 
21581   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
21582   // 128 will be split into multiple interleaved accesses.
21583   if (Subtarget->hasNEON() && VecSize == 64)
21584     return true;
21585   return VecSize % 128 == 0;
21586 }
21587 
21588 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
21589   if (Subtarget->hasNEON())
21590     return 4;
21591   if (Subtarget->hasMVEIntegerOps())
21592     return MVEMaxSupportedInterleaveFactor;
21593   return TargetLoweringBase::getMaxSupportedInterleaveFactor();
21594 }
21595 
21596 /// Lower an interleaved load into a vldN intrinsic.
21597 ///
21598 /// E.g. Lower an interleaved load (Factor = 2):
21599 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
21600 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
21601 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
21602 ///
21603 ///      Into:
21604 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
21605 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
21606 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
21607 bool ARMTargetLowering::lowerInterleavedLoad(
21608     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
21609     ArrayRef<unsigned> Indices, unsigned Factor) const {
21610   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
21611          "Invalid interleave factor");
21612   assert(!Shuffles.empty() && "Empty shufflevector input");
21613   assert(Shuffles.size() == Indices.size() &&
21614          "Unmatched number of shufflevectors and indices");
21615 
21616   auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType());
21617   Type *EltTy = VecTy->getElementType();
21618 
21619   const DataLayout &DL = LI->getModule()->getDataLayout();
21620   Align Alignment = LI->getAlign();
21621 
21622   // Skip if we do not have NEON and skip illegal vector types. We can
21623   // "legalize" wide vector types into multiple interleaved accesses as long as
21624   // the vector types are divisible by 128.
21625   if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL))
21626     return false;
21627 
21628   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
21629 
21630   // A pointer vector can not be the return type of the ldN intrinsics. Need to
21631   // load integer vectors first and then convert to pointer vectors.
21632   if (EltTy->isPointerTy())
21633     VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy);
21634 
21635   IRBuilder<> Builder(LI);
21636 
21637   // The base address of the load.
21638   Value *BaseAddr = LI->getPointerOperand();
21639 
21640   if (NumLoads > 1) {
21641     // If we're going to generate more than one load, reset the sub-vector type
21642     // to something legal.
21643     VecTy = FixedVectorType::get(VecTy->getElementType(),
21644                                  VecTy->getNumElements() / NumLoads);
21645   }
21646 
21647   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
21648 
21649   auto createLoadIntrinsic = [&](Value *BaseAddr) {
21650     if (Subtarget->hasNEON()) {
21651       Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
21652       Type *Tys[] = {VecTy, PtrTy};
21653       static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
21654                                                 Intrinsic::arm_neon_vld3,
21655                                                 Intrinsic::arm_neon_vld4};
21656       Function *VldnFunc =
21657           Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
21658 
21659       SmallVector<Value *, 2> Ops;
21660       Ops.push_back(BaseAddr);
21661       Ops.push_back(Builder.getInt32(LI->getAlign().value()));
21662 
21663       return Builder.CreateCall(VldnFunc, Ops, "vldN");
21664     } else {
21665       assert((Factor == 2 || Factor == 4) &&
21666              "expected interleave factor of 2 or 4 for MVE");
21667       Intrinsic::ID LoadInts =
21668           Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
21669       Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
21670       Type *Tys[] = {VecTy, PtrTy};
21671       Function *VldnFunc =
21672           Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
21673 
21674       SmallVector<Value *, 2> Ops;
21675       Ops.push_back(BaseAddr);
21676       return Builder.CreateCall(VldnFunc, Ops, "vldN");
21677     }
21678   };
21679 
21680   // Holds sub-vectors extracted from the load intrinsic return values. The
21681   // sub-vectors are associated with the shufflevector instructions they will
21682   // replace.
21683   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
21684 
21685   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
21686     // If we're generating more than one load, compute the base address of
21687     // subsequent loads as an offset from the previous.
21688     if (LoadCount > 0)
21689       BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
21690                                             VecTy->getNumElements() * Factor);
21691 
21692     CallInst *VldN = createLoadIntrinsic(BaseAddr);
21693 
21694     // Replace uses of each shufflevector with the corresponding vector loaded
21695     // by ldN.
21696     for (unsigned i = 0; i < Shuffles.size(); i++) {
21697       ShuffleVectorInst *SV = Shuffles[i];
21698       unsigned Index = Indices[i];
21699 
21700       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
21701 
21702       // Convert the integer vector to pointer vector if the element is pointer.
21703       if (EltTy->isPointerTy())
21704         SubVec = Builder.CreateIntToPtr(
21705             SubVec,
21706             FixedVectorType::get(SV->getType()->getElementType(), VecTy));
21707 
21708       SubVecs[SV].push_back(SubVec);
21709     }
21710   }
21711 
21712   // Replace uses of the shufflevector instructions with the sub-vectors
21713   // returned by the load intrinsic. If a shufflevector instruction is
21714   // associated with more than one sub-vector, those sub-vectors will be
21715   // concatenated into a single wide vector.
21716   for (ShuffleVectorInst *SVI : Shuffles) {
21717     auto &SubVec = SubVecs[SVI];
21718     auto *WideVec =
21719         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
21720     SVI->replaceAllUsesWith(WideVec);
21721   }
21722 
21723   return true;
21724 }
21725 
21726 /// Lower an interleaved store into a vstN intrinsic.
21727 ///
21728 /// E.g. Lower an interleaved store (Factor = 3):
21729 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
21730 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
21731 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
21732 ///
21733 ///      Into:
21734 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
21735 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
21736 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
21737 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
21738 ///
21739 /// Note that the new shufflevectors will be removed and we'll only generate one
21740 /// vst3 instruction in CodeGen.
21741 ///
21742 /// Example for a more general valid mask (Factor 3). Lower:
21743 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
21744 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
21745 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
21746 ///
21747 ///      Into:
21748 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
21749 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
21750 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
21751 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
21752 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
21753                                               ShuffleVectorInst *SVI,
21754                                               unsigned Factor) const {
21755   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
21756          "Invalid interleave factor");
21757 
21758   auto *VecTy = cast<FixedVectorType>(SVI->getType());
21759   assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
21760 
21761   unsigned LaneLen = VecTy->getNumElements() / Factor;
21762   Type *EltTy = VecTy->getElementType();
21763   auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
21764 
21765   const DataLayout &DL = SI->getModule()->getDataLayout();
21766   Align Alignment = SI->getAlign();
21767 
21768   // Skip if we do not have NEON and skip illegal vector types. We can
21769   // "legalize" wide vector types into multiple interleaved accesses as long as
21770   // the vector types are divisible by 128.
21771   if (!isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
21772     return false;
21773 
21774   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
21775 
21776   Value *Op0 = SVI->getOperand(0);
21777   Value *Op1 = SVI->getOperand(1);
21778   IRBuilder<> Builder(SI);
21779 
21780   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
21781   // vectors to integer vectors.
21782   if (EltTy->isPointerTy()) {
21783     Type *IntTy = DL.getIntPtrType(EltTy);
21784 
21785     // Convert to the corresponding integer vector.
21786     auto *IntVecTy =
21787         FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType()));
21788     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
21789     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
21790 
21791     SubVecTy = FixedVectorType::get(IntTy, LaneLen);
21792   }
21793 
21794   // The base address of the store.
21795   Value *BaseAddr = SI->getPointerOperand();
21796 
21797   if (NumStores > 1) {
21798     // If we're going to generate more than one store, reset the lane length
21799     // and sub-vector type to something legal.
21800     LaneLen /= NumStores;
21801     SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
21802   }
21803 
21804   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
21805 
21806   auto Mask = SVI->getShuffleMask();
21807 
21808   auto createStoreIntrinsic = [&](Value *BaseAddr,
21809                                   SmallVectorImpl<Value *> &Shuffles) {
21810     if (Subtarget->hasNEON()) {
21811       static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
21812                                                  Intrinsic::arm_neon_vst3,
21813                                                  Intrinsic::arm_neon_vst4};
21814       Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
21815       Type *Tys[] = {PtrTy, SubVecTy};
21816 
21817       Function *VstNFunc = Intrinsic::getDeclaration(
21818           SI->getModule(), StoreInts[Factor - 2], Tys);
21819 
21820       SmallVector<Value *, 6> Ops;
21821       Ops.push_back(BaseAddr);
21822       append_range(Ops, Shuffles);
21823       Ops.push_back(Builder.getInt32(SI->getAlign().value()));
21824       Builder.CreateCall(VstNFunc, Ops);
21825     } else {
21826       assert((Factor == 2 || Factor == 4) &&
21827              "expected interleave factor of 2 or 4 for MVE");
21828       Intrinsic::ID StoreInts =
21829           Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
21830       Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
21831       Type *Tys[] = {PtrTy, SubVecTy};
21832       Function *VstNFunc =
21833           Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
21834 
21835       SmallVector<Value *, 6> Ops;
21836       Ops.push_back(BaseAddr);
21837       append_range(Ops, Shuffles);
21838       for (unsigned F = 0; F < Factor; F++) {
21839         Ops.push_back(Builder.getInt32(F));
21840         Builder.CreateCall(VstNFunc, Ops);
21841         Ops.pop_back();
21842       }
21843     }
21844   };
21845 
21846   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
21847     // If we generating more than one store, we compute the base address of
21848     // subsequent stores as an offset from the previous.
21849     if (StoreCount > 0)
21850       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
21851                                             BaseAddr, LaneLen * Factor);
21852 
21853     SmallVector<Value *, 4> Shuffles;
21854 
21855     // Split the shufflevector operands into sub vectors for the new vstN call.
21856     for (unsigned i = 0; i < Factor; i++) {
21857       unsigned IdxI = StoreCount * LaneLen * Factor + i;
21858       if (Mask[IdxI] >= 0) {
21859         Shuffles.push_back(Builder.CreateShuffleVector(
21860             Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)));
21861       } else {
21862         unsigned StartMask = 0;
21863         for (unsigned j = 1; j < LaneLen; j++) {
21864           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
21865           if (Mask[IdxJ * Factor + IdxI] >= 0) {
21866             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
21867             break;
21868           }
21869         }
21870         // Note: If all elements in a chunk are undefs, StartMask=0!
21871         // Note: Filling undef gaps with random elements is ok, since
21872         // those elements were being written anyway (with undefs).
21873         // In the case of all undefs we're defaulting to using elems from 0
21874         // Note: StartMask cannot be negative, it's checked in
21875         // isReInterleaveMask
21876         Shuffles.push_back(Builder.CreateShuffleVector(
21877             Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)));
21878       }
21879     }
21880 
21881     createStoreIntrinsic(BaseAddr, Shuffles);
21882   }
21883   return true;
21884 }
21885 
21886 enum HABaseType {
21887   HA_UNKNOWN = 0,
21888   HA_FLOAT,
21889   HA_DOUBLE,
21890   HA_VECT64,
21891   HA_VECT128
21892 };
21893 
21894 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
21895                                    uint64_t &Members) {
21896   if (auto *ST = dyn_cast<StructType>(Ty)) {
21897     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
21898       uint64_t SubMembers = 0;
21899       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
21900         return false;
21901       Members += SubMembers;
21902     }
21903   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
21904     uint64_t SubMembers = 0;
21905     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
21906       return false;
21907     Members += SubMembers * AT->getNumElements();
21908   } else if (Ty->isFloatTy()) {
21909     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
21910       return false;
21911     Members = 1;
21912     Base = HA_FLOAT;
21913   } else if (Ty->isDoubleTy()) {
21914     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
21915       return false;
21916     Members = 1;
21917     Base = HA_DOUBLE;
21918   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
21919     Members = 1;
21920     switch (Base) {
21921     case HA_FLOAT:
21922     case HA_DOUBLE:
21923       return false;
21924     case HA_VECT64:
21925       return VT->getPrimitiveSizeInBits().getFixedValue() == 64;
21926     case HA_VECT128:
21927       return VT->getPrimitiveSizeInBits().getFixedValue() == 128;
21928     case HA_UNKNOWN:
21929       switch (VT->getPrimitiveSizeInBits().getFixedValue()) {
21930       case 64:
21931         Base = HA_VECT64;
21932         return true;
21933       case 128:
21934         Base = HA_VECT128;
21935         return true;
21936       default:
21937         return false;
21938       }
21939     }
21940   }
21941 
21942   return (Members > 0 && Members <= 4);
21943 }
21944 
21945 /// Return the correct alignment for the current calling convention.
21946 Align ARMTargetLowering::getABIAlignmentForCallingConv(
21947     Type *ArgTy, const DataLayout &DL) const {
21948   const Align ABITypeAlign = DL.getABITypeAlign(ArgTy);
21949   if (!ArgTy->isVectorTy())
21950     return ABITypeAlign;
21951 
21952   // Avoid over-aligning vector parameters. It would require realigning the
21953   // stack and waste space for no real benefit.
21954   return std::min(ABITypeAlign, DL.getStackAlignment());
21955 }
21956 
21957 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
21958 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
21959 /// passing according to AAPCS rules.
21960 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
21961     Type *Ty, CallingConv::ID CallConv, bool isVarArg,
21962     const DataLayout &DL) const {
21963   if (getEffectiveCallingConv(CallConv, isVarArg) !=
21964       CallingConv::ARM_AAPCS_VFP)
21965     return false;
21966 
21967   HABaseType Base = HA_UNKNOWN;
21968   uint64_t Members = 0;
21969   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
21970   LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
21971 
21972   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
21973   return IsHA || IsIntArray;
21974 }
21975 
21976 Register ARMTargetLowering::getExceptionPointerRegister(
21977     const Constant *PersonalityFn) const {
21978   // Platforms which do not use SjLj EH may return values in these registers
21979   // via the personality function.
21980   return Subtarget->useSjLjEH() ? Register() : ARM::R0;
21981 }
21982 
21983 Register ARMTargetLowering::getExceptionSelectorRegister(
21984     const Constant *PersonalityFn) const {
21985   // Platforms which do not use SjLj EH may return values in these registers
21986   // via the personality function.
21987   return Subtarget->useSjLjEH() ? Register() : ARM::R1;
21988 }
21989 
21990 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
21991   // Update IsSplitCSR in ARMFunctionInfo.
21992   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
21993   AFI->setIsSplitCSR(true);
21994 }
21995 
21996 void ARMTargetLowering::insertCopiesSplitCSR(
21997     MachineBasicBlock *Entry,
21998     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
21999   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
22000   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
22001   if (!IStart)
22002     return;
22003 
22004   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
22005   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
22006   MachineBasicBlock::iterator MBBI = Entry->begin();
22007   for (const MCPhysReg *I = IStart; *I; ++I) {
22008     const TargetRegisterClass *RC = nullptr;
22009     if (ARM::GPRRegClass.contains(*I))
22010       RC = &ARM::GPRRegClass;
22011     else if (ARM::DPRRegClass.contains(*I))
22012       RC = &ARM::DPRRegClass;
22013     else
22014       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
22015 
22016     Register NewVR = MRI->createVirtualRegister(RC);
22017     // Create copy from CSR to a virtual register.
22018     // FIXME: this currently does not emit CFI pseudo-instructions, it works
22019     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
22020     // nounwind. If we want to generalize this later, we may need to emit
22021     // CFI pseudo-instructions.
22022     assert(Entry->getParent()->getFunction().hasFnAttribute(
22023                Attribute::NoUnwind) &&
22024            "Function should be nounwind in insertCopiesSplitCSR!");
22025     Entry->addLiveIn(*I);
22026     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
22027         .addReg(*I);
22028 
22029     // Insert the copy-back instructions right before the terminator.
22030     for (auto *Exit : Exits)
22031       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
22032               TII->get(TargetOpcode::COPY), *I)
22033           .addReg(NewVR);
22034   }
22035 }
22036 
22037 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
22038   MF.getFrameInfo().computeMaxCallFrameSize(MF);
22039   TargetLoweringBase::finalizeLowering(MF);
22040 }
22041 
22042 bool ARMTargetLowering::isComplexDeinterleavingSupported() const {
22043   return Subtarget->hasMVEIntegerOps();
22044 }
22045 
22046 bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
22047     ComplexDeinterleavingOperation Operation, Type *Ty) const {
22048   auto *VTy = dyn_cast<FixedVectorType>(Ty);
22049   if (!VTy)
22050     return false;
22051 
22052   auto *ScalarTy = VTy->getScalarType();
22053   unsigned NumElements = VTy->getNumElements();
22054 
22055   unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements;
22056   if (VTyWidth < 128 || !llvm::isPowerOf2_32(VTyWidth))
22057     return false;
22058 
22059   // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32
22060   if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy())
22061     return Subtarget->hasMVEFloatOps();
22062 
22063   if (Operation != ComplexDeinterleavingOperation::CAdd)
22064     return false;
22065 
22066   return Subtarget->hasMVEIntegerOps() &&
22067          (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
22068           ScalarTy->isIntegerTy(32));
22069 }
22070 
22071 Value *ARMTargetLowering::createComplexDeinterleavingIR(
22072     IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
22073     ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
22074     Value *Accumulator) const {
22075 
22076   FixedVectorType *Ty = cast<FixedVectorType>(InputA->getType());
22077 
22078   unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements();
22079 
22080   assert(TyWidth >= 128 && "Width of vector type must be at least 128 bits");
22081 
22082   if (TyWidth > 128) {
22083     int Stride = Ty->getNumElements() / 2;
22084     auto SplitSeq = llvm::seq<int>(0, Ty->getNumElements());
22085     auto SplitSeqVec = llvm::to_vector(SplitSeq);
22086     ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride);
22087     ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride);
22088 
22089     auto *LowerSplitA = B.CreateShuffleVector(InputA, LowerSplitMask);
22090     auto *LowerSplitB = B.CreateShuffleVector(InputB, LowerSplitMask);
22091     auto *UpperSplitA = B.CreateShuffleVector(InputA, UpperSplitMask);
22092     auto *UpperSplitB = B.CreateShuffleVector(InputB, UpperSplitMask);
22093     Value *LowerSplitAcc = nullptr;
22094     Value *UpperSplitAcc = nullptr;
22095 
22096     if (Accumulator) {
22097       LowerSplitAcc = B.CreateShuffleVector(Accumulator, LowerSplitMask);
22098       UpperSplitAcc = B.CreateShuffleVector(Accumulator, UpperSplitMask);
22099     }
22100 
22101     auto *LowerSplitInt = createComplexDeinterleavingIR(
22102         B, OperationType, Rotation, LowerSplitA, LowerSplitB, LowerSplitAcc);
22103     auto *UpperSplitInt = createComplexDeinterleavingIR(
22104         B, OperationType, Rotation, UpperSplitA, UpperSplitB, UpperSplitAcc);
22105 
22106     ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements());
22107     return B.CreateShuffleVector(LowerSplitInt, UpperSplitInt, JoinMask);
22108   }
22109 
22110   auto *IntTy = Type::getInt32Ty(B.getContext());
22111 
22112   ConstantInt *ConstRotation = nullptr;
22113   if (OperationType == ComplexDeinterleavingOperation::CMulPartial) {
22114     ConstRotation = ConstantInt::get(IntTy, (int)Rotation);
22115 
22116     if (Accumulator)
22117       return B.CreateIntrinsic(Intrinsic::arm_mve_vcmlaq, Ty,
22118                                {ConstRotation, Accumulator, InputB, InputA});
22119     return B.CreateIntrinsic(Intrinsic::arm_mve_vcmulq, Ty,
22120                              {ConstRotation, InputB, InputA});
22121   }
22122 
22123   if (OperationType == ComplexDeinterleavingOperation::CAdd) {
22124     // 1 means the value is not halved.
22125     auto *ConstHalving = ConstantInt::get(IntTy, 1);
22126 
22127     if (Rotation == ComplexDeinterleavingRotation::Rotation_90)
22128       ConstRotation = ConstantInt::get(IntTy, 0);
22129     else if (Rotation == ComplexDeinterleavingRotation::Rotation_270)
22130       ConstRotation = ConstantInt::get(IntTy, 1);
22131 
22132     if (!ConstRotation)
22133       return nullptr; // Invalid rotation for arm_mve_vcaddq
22134 
22135     return B.CreateIntrinsic(Intrinsic::arm_mve_vcaddq, Ty,
22136                              {ConstHalving, ConstRotation, InputA, InputB});
22137   }
22138 
22139   return nullptr;
22140 }
22141