1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/RuntimeLibcalls.h"
56 #include "llvm/CodeGen/SelectionDAG.h"
57 #include "llvm/CodeGen/SelectionDAGNodes.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/CallingConv.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/Instruction.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/Intrinsics.h"
81 #include "llvm/IR/IntrinsicsARM.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/PatternMatch.h"
84 #include "llvm/IR/Type.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/MC/MCInstrDesc.h"
88 #include "llvm/MC/MCInstrItineraries.h"
89 #include "llvm/MC/MCRegisterInfo.h"
90 #include "llvm/MC/MCSchedule.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/BranchProbability.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CodeGen.h"
95 #include "llvm/Support/CommandLine.h"
96 #include "llvm/Support/Compiler.h"
97 #include "llvm/Support/Debug.h"
98 #include "llvm/Support/ErrorHandling.h"
99 #include "llvm/Support/KnownBits.h"
100 #include "llvm/Support/MachineValueType.h"
101 #include "llvm/Support/MathExtras.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
105 #include <algorithm>
106 #include <cassert>
107 #include <cstdint>
108 #include <cstdlib>
109 #include <iterator>
110 #include <limits>
111 #include <string>
112 #include <tuple>
113 #include <utility>
114 #include <vector>
115 
116 using namespace llvm;
117 using namespace llvm::PatternMatch;
118 
119 #define DEBUG_TYPE "arm-isel"
120 
121 STATISTIC(NumTailCalls, "Number of tail calls");
122 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
123 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
124 STATISTIC(NumConstpoolPromoted,
125   "Number of constants with their storage promoted into constant pools");
126 
127 static cl::opt<bool>
128 ARMInterworking("arm-interworking", cl::Hidden,
129   cl::desc("Enable / disable ARM interworking (for debugging only)"),
130   cl::init(true));
131 
132 static cl::opt<bool> EnableConstpoolPromotion(
133     "arm-promote-constant", cl::Hidden,
134     cl::desc("Enable / disable promotion of unnamed_addr constants into "
135              "constant pools"),
136     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
137 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
138     "arm-promote-constant-max-size", cl::Hidden,
139     cl::desc("Maximum size of constant to promote into a constant pool"),
140     cl::init(64));
141 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
142     "arm-promote-constant-max-total", cl::Hidden,
143     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
144     cl::init(128));
145 
146 static cl::opt<unsigned>
147 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
148   cl::desc("Maximum interleave factor for MVE VLDn to generate."),
149   cl::init(2));
150 
151 // The APCS parameter registers.
152 static const MCPhysReg GPRArgRegs[] = {
153   ARM::R0, ARM::R1, ARM::R2, ARM::R3
154 };
155 
156 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
157                                        MVT PromotedBitwiseVT) {
158   if (VT != PromotedLdStVT) {
159     setOperationAction(ISD::LOAD, VT, Promote);
160     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
161 
162     setOperationAction(ISD::STORE, VT, Promote);
163     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
164   }
165 
166   MVT ElemTy = VT.getVectorElementType();
167   if (ElemTy != MVT::f64)
168     setOperationAction(ISD::SETCC, VT, Custom);
169   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
170   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
171   if (ElemTy == MVT::i32) {
172     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
173     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
174     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
175     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
176   } else {
177     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
178     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
179     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
180     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
181   }
182   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
183   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
184   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
185   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
186   setOperationAction(ISD::SELECT,            VT, Expand);
187   setOperationAction(ISD::SELECT_CC,         VT, Expand);
188   setOperationAction(ISD::VSELECT,           VT, Expand);
189   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
190   if (VT.isInteger()) {
191     setOperationAction(ISD::SHL, VT, Custom);
192     setOperationAction(ISD::SRA, VT, Custom);
193     setOperationAction(ISD::SRL, VT, Custom);
194   }
195 
196   // Promote all bit-wise operations.
197   if (VT.isInteger() && VT != PromotedBitwiseVT) {
198     setOperationAction(ISD::AND, VT, Promote);
199     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
200     setOperationAction(ISD::OR,  VT, Promote);
201     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
202     setOperationAction(ISD::XOR, VT, Promote);
203     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
204   }
205 
206   // Neon does not support vector divide/remainder operations.
207   setOperationAction(ISD::SDIV, VT, Expand);
208   setOperationAction(ISD::UDIV, VT, Expand);
209   setOperationAction(ISD::FDIV, VT, Expand);
210   setOperationAction(ISD::SREM, VT, Expand);
211   setOperationAction(ISD::UREM, VT, Expand);
212   setOperationAction(ISD::FREM, VT, Expand);
213   setOperationAction(ISD::SDIVREM, VT, Expand);
214   setOperationAction(ISD::UDIVREM, VT, Expand);
215 
216   if (!VT.isFloatingPoint() &&
217       VT != MVT::v2i64 && VT != MVT::v1i64)
218     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
219       setOperationAction(Opcode, VT, Legal);
220   if (!VT.isFloatingPoint())
221     for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
222       setOperationAction(Opcode, VT, Legal);
223 }
224 
225 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
226   addRegisterClass(VT, &ARM::DPRRegClass);
227   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
228 }
229 
230 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
231   addRegisterClass(VT, &ARM::DPairRegClass);
232   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
233 }
234 
235 void ARMTargetLowering::setAllExpand(MVT VT) {
236   for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
237     setOperationAction(Opc, VT, Expand);
238 
239   // We support these really simple operations even on types where all
240   // the actual arithmetic has to be broken down into simpler
241   // operations or turned into library calls.
242   setOperationAction(ISD::BITCAST, VT, Legal);
243   setOperationAction(ISD::LOAD, VT, Legal);
244   setOperationAction(ISD::STORE, VT, Legal);
245   setOperationAction(ISD::UNDEF, VT, Legal);
246 }
247 
248 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
249                                        LegalizeAction Action) {
250   setLoadExtAction(ISD::EXTLOAD,  From, To, Action);
251   setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
252   setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
253 }
254 
255 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
256   const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
257 
258   for (auto VT : IntTypes) {
259     addRegisterClass(VT, &ARM::MQPRRegClass);
260     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
261     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
262     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
263     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
264     setOperationAction(ISD::SHL, VT, Custom);
265     setOperationAction(ISD::SRA, VT, Custom);
266     setOperationAction(ISD::SRL, VT, Custom);
267     setOperationAction(ISD::SMIN, VT, Legal);
268     setOperationAction(ISD::SMAX, VT, Legal);
269     setOperationAction(ISD::UMIN, VT, Legal);
270     setOperationAction(ISD::UMAX, VT, Legal);
271     setOperationAction(ISD::ABS, VT, Legal);
272     setOperationAction(ISD::SETCC, VT, Custom);
273     setOperationAction(ISD::MLOAD, VT, Custom);
274     setOperationAction(ISD::MSTORE, VT, Legal);
275     setOperationAction(ISD::CTLZ, VT, Legal);
276     setOperationAction(ISD::CTTZ, VT, Custom);
277     setOperationAction(ISD::BITREVERSE, VT, Legal);
278     setOperationAction(ISD::BSWAP, VT, Legal);
279     setOperationAction(ISD::SADDSAT, VT, Legal);
280     setOperationAction(ISD::UADDSAT, VT, Legal);
281     setOperationAction(ISD::SSUBSAT, VT, Legal);
282     setOperationAction(ISD::USUBSAT, VT, Legal);
283 
284     // No native support for these.
285     setOperationAction(ISD::UDIV, VT, Expand);
286     setOperationAction(ISD::SDIV, VT, Expand);
287     setOperationAction(ISD::UREM, VT, Expand);
288     setOperationAction(ISD::SREM, VT, Expand);
289     setOperationAction(ISD::UDIVREM, VT, Expand);
290     setOperationAction(ISD::SDIVREM, VT, Expand);
291     setOperationAction(ISD::CTPOP, VT, Expand);
292 
293     // Vector reductions
294     setOperationAction(ISD::VECREDUCE_ADD, VT, Legal);
295     setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal);
296     setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal);
297     setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal);
298     setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal);
299     setOperationAction(ISD::VECREDUCE_MUL, VT, Custom);
300     setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
301     setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
302     setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
303 
304     if (!HasMVEFP) {
305       setOperationAction(ISD::SINT_TO_FP, VT, Expand);
306       setOperationAction(ISD::UINT_TO_FP, VT, Expand);
307       setOperationAction(ISD::FP_TO_SINT, VT, Expand);
308       setOperationAction(ISD::FP_TO_UINT, VT, Expand);
309     }
310 
311     // Pre and Post inc are supported on loads and stores
312     for (unsigned im = (unsigned)ISD::PRE_INC;
313          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
314       setIndexedLoadAction(im, VT, Legal);
315       setIndexedStoreAction(im, VT, Legal);
316       setIndexedMaskedLoadAction(im, VT, Legal);
317       setIndexedMaskedStoreAction(im, VT, Legal);
318     }
319   }
320 
321   const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
322   for (auto VT : FloatTypes) {
323     addRegisterClass(VT, &ARM::MQPRRegClass);
324     if (!HasMVEFP)
325       setAllExpand(VT);
326 
327     // These are legal or custom whether we have MVE.fp or not
328     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
329     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
330     setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
331     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
332     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
333     setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
334     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
335     setOperationAction(ISD::SETCC, VT, Custom);
336     setOperationAction(ISD::MLOAD, VT, Custom);
337     setOperationAction(ISD::MSTORE, VT, Legal);
338 
339     // Pre and Post inc are supported on loads and stores
340     for (unsigned im = (unsigned)ISD::PRE_INC;
341          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
342       setIndexedLoadAction(im, VT, Legal);
343       setIndexedStoreAction(im, VT, Legal);
344       setIndexedMaskedLoadAction(im, VT, Legal);
345       setIndexedMaskedStoreAction(im, VT, Legal);
346     }
347 
348     if (HasMVEFP) {
349       setOperationAction(ISD::FMINNUM, VT, Legal);
350       setOperationAction(ISD::FMAXNUM, VT, Legal);
351       setOperationAction(ISD::FROUND, VT, Legal);
352       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
353       setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom);
354       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
355       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
356 
357       // No native support for these.
358       setOperationAction(ISD::FDIV, VT, Expand);
359       setOperationAction(ISD::FREM, VT, Expand);
360       setOperationAction(ISD::FSQRT, VT, Expand);
361       setOperationAction(ISD::FSIN, VT, Expand);
362       setOperationAction(ISD::FCOS, VT, Expand);
363       setOperationAction(ISD::FPOW, VT, Expand);
364       setOperationAction(ISD::FLOG, VT, Expand);
365       setOperationAction(ISD::FLOG2, VT, Expand);
366       setOperationAction(ISD::FLOG10, VT, Expand);
367       setOperationAction(ISD::FEXP, VT, Expand);
368       setOperationAction(ISD::FEXP2, VT, Expand);
369       setOperationAction(ISD::FNEARBYINT, VT, Expand);
370     }
371   }
372 
373   // Custom Expand smaller than legal vector reductions to prevent false zero
374   // items being added.
375   setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom);
376   setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom);
377   setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom);
378   setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom);
379   setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom);
380   setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom);
381   setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom);
382   setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom);
383 
384   // We 'support' these types up to bitcast/load/store level, regardless of
385   // MVE integer-only / float support. Only doing FP data processing on the FP
386   // vector types is inhibited at integer-only level.
387   const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
388   for (auto VT : LongTypes) {
389     addRegisterClass(VT, &ARM::MQPRRegClass);
390     setAllExpand(VT);
391     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
392     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
393     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
394   }
395   // We can do bitwise operations on v2i64 vectors
396   setOperationAction(ISD::AND, MVT::v2i64, Legal);
397   setOperationAction(ISD::OR, MVT::v2i64, Legal);
398   setOperationAction(ISD::XOR, MVT::v2i64, Legal);
399 
400   // It is legal to extload from v4i8 to v4i16 or v4i32.
401   addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
402   addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
403   addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);
404 
405   // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16.
406   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
407   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
408   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
409   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8,  Legal);
410   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal);
411 
412   // Some truncating stores are legal too.
413   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
414   setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
415   setTruncStoreAction(MVT::v8i16, MVT::v8i8,  Legal);
416 
417   // Pre and Post inc on these are legal, given the correct extends
418   for (unsigned im = (unsigned)ISD::PRE_INC;
419        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
420     for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
421       setIndexedLoadAction(im, VT, Legal);
422       setIndexedStoreAction(im, VT, Legal);
423       setIndexedMaskedLoadAction(im, VT, Legal);
424       setIndexedMaskedStoreAction(im, VT, Legal);
425     }
426   }
427 
428   // Predicate types
429   const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1};
430   for (auto VT : pTypes) {
431     addRegisterClass(VT, &ARM::VCCRRegClass);
432     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
433     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
434     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
435     setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
436     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
437     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
438     setOperationAction(ISD::SETCC, VT, Custom);
439     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
440     setOperationAction(ISD::LOAD, VT, Custom);
441     setOperationAction(ISD::STORE, VT, Custom);
442   }
443 }
444 
445 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
446                                      const ARMSubtarget &STI)
447     : TargetLowering(TM), Subtarget(&STI) {
448   RegInfo = Subtarget->getRegisterInfo();
449   Itins = Subtarget->getInstrItineraryData();
450 
451   setBooleanContents(ZeroOrOneBooleanContent);
452   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
453 
454   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
455       !Subtarget->isTargetWatchOS()) {
456     bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
457     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
458       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
459                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
460                                        : CallingConv::ARM_AAPCS);
461   }
462 
463   if (Subtarget->isTargetMachO()) {
464     // Uses VFP for Thumb libfuncs if available.
465     if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
466         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
467       static const struct {
468         const RTLIB::Libcall Op;
469         const char * const Name;
470         const ISD::CondCode Cond;
471       } LibraryCalls[] = {
472         // Single-precision floating-point arithmetic.
473         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
474         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
475         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
476         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
477 
478         // Double-precision floating-point arithmetic.
479         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
480         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
481         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
482         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
483 
484         // Single-precision comparisons.
485         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
486         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
487         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
488         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
489         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
490         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
491         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
492 
493         // Double-precision comparisons.
494         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
495         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
496         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
497         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
498         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
499         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
500         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
501 
502         // Floating-point to integer conversions.
503         // i64 conversions are done via library routines even when generating VFP
504         // instructions, so use the same ones.
505         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
506         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
507         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
508         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
509 
510         // Conversions between floating types.
511         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
512         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
513 
514         // Integer to floating-point conversions.
515         // i64 conversions are done via library routines even when generating VFP
516         // instructions, so use the same ones.
517         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
518         // e.g., __floatunsidf vs. __floatunssidfvfp.
519         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
520         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
521         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
522         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
523       };
524 
525       for (const auto &LC : LibraryCalls) {
526         setLibcallName(LC.Op, LC.Name);
527         if (LC.Cond != ISD::SETCC_INVALID)
528           setCmpLibcallCC(LC.Op, LC.Cond);
529       }
530     }
531   }
532 
533   // These libcalls are not available in 32-bit.
534   setLibcallName(RTLIB::SHL_I128, nullptr);
535   setLibcallName(RTLIB::SRL_I128, nullptr);
536   setLibcallName(RTLIB::SRA_I128, nullptr);
537 
538   // RTLIB
539   if (Subtarget->isAAPCS_ABI() &&
540       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
541        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
542     static const struct {
543       const RTLIB::Libcall Op;
544       const char * const Name;
545       const CallingConv::ID CC;
546       const ISD::CondCode Cond;
547     } LibraryCalls[] = {
548       // Double-precision floating-point arithmetic helper functions
549       // RTABI chapter 4.1.2, Table 2
550       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
551       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
552       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
553       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
554 
555       // Double-precision floating-point comparison helper functions
556       // RTABI chapter 4.1.2, Table 3
557       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
558       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
559       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
560       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
561       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
562       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
563       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
564 
565       // Single-precision floating-point arithmetic helper functions
566       // RTABI chapter 4.1.2, Table 4
567       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
568       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
569       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
570       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
571 
572       // Single-precision floating-point comparison helper functions
573       // RTABI chapter 4.1.2, Table 5
574       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
575       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
576       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
577       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
578       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
579       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
580       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
581 
582       // Floating-point to integer conversions.
583       // RTABI chapter 4.1.2, Table 6
584       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
585       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
586       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
587       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
588       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
589       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
590       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
591       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
592 
593       // Conversions between floating types.
594       // RTABI chapter 4.1.2, Table 7
595       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
596       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
597       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
598 
599       // Integer to floating-point conversions.
600       // RTABI chapter 4.1.2, Table 8
601       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
602       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
603       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
604       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
605       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
606       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
607       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
608       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
609 
610       // Long long helper functions
611       // RTABI chapter 4.2, Table 9
612       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
613       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
614       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
615       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
616 
617       // Integer division functions
618       // RTABI chapter 4.3.1
619       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
620       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
621       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
622       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
623       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
624       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
625       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
626       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
627     };
628 
629     for (const auto &LC : LibraryCalls) {
630       setLibcallName(LC.Op, LC.Name);
631       setLibcallCallingConv(LC.Op, LC.CC);
632       if (LC.Cond != ISD::SETCC_INVALID)
633         setCmpLibcallCC(LC.Op, LC.Cond);
634     }
635 
636     // EABI dependent RTLIB
637     if (TM.Options.EABIVersion == EABI::EABI4 ||
638         TM.Options.EABIVersion == EABI::EABI5) {
639       static const struct {
640         const RTLIB::Libcall Op;
641         const char *const Name;
642         const CallingConv::ID CC;
643         const ISD::CondCode Cond;
644       } MemOpsLibraryCalls[] = {
645         // Memory operations
646         // RTABI chapter 4.3.4
647         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
648         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
649         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
650       };
651 
652       for (const auto &LC : MemOpsLibraryCalls) {
653         setLibcallName(LC.Op, LC.Name);
654         setLibcallCallingConv(LC.Op, LC.CC);
655         if (LC.Cond != ISD::SETCC_INVALID)
656           setCmpLibcallCC(LC.Op, LC.Cond);
657       }
658     }
659   }
660 
661   if (Subtarget->isTargetWindows()) {
662     static const struct {
663       const RTLIB::Libcall Op;
664       const char * const Name;
665       const CallingConv::ID CC;
666     } LibraryCalls[] = {
667       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
668       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
669       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
670       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
671       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
672       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
673       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
674       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
675     };
676 
677     for (const auto &LC : LibraryCalls) {
678       setLibcallName(LC.Op, LC.Name);
679       setLibcallCallingConv(LC.Op, LC.CC);
680     }
681   }
682 
683   // Use divmod compiler-rt calls for iOS 5.0 and later.
684   if (Subtarget->isTargetMachO() &&
685       !(Subtarget->isTargetIOS() &&
686         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
687     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
688     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
689   }
690 
691   // The half <-> float conversion functions are always soft-float on
692   // non-watchos platforms, but are needed for some targets which use a
693   // hard-float calling convention by default.
694   if (!Subtarget->isTargetWatchABI()) {
695     if (Subtarget->isAAPCS_ABI()) {
696       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
697       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
698       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
699     } else {
700       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
701       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
702       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
703     }
704   }
705 
706   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
707   // a __gnu_ prefix (which is the default).
708   if (Subtarget->isTargetAEABI()) {
709     static const struct {
710       const RTLIB::Libcall Op;
711       const char * const Name;
712       const CallingConv::ID CC;
713     } LibraryCalls[] = {
714       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
715       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
716       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
717     };
718 
719     for (const auto &LC : LibraryCalls) {
720       setLibcallName(LC.Op, LC.Name);
721       setLibcallCallingConv(LC.Op, LC.CC);
722     }
723   }
724 
725   if (Subtarget->isThumb1Only())
726     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
727   else
728     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
729 
730   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
731       Subtarget->hasFPRegs()) {
732     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
733     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
734     if (!Subtarget->hasVFP2Base())
735       setAllExpand(MVT::f32);
736     if (!Subtarget->hasFP64())
737       setAllExpand(MVT::f64);
738   }
739 
740   if (Subtarget->hasFullFP16()) {
741     addRegisterClass(MVT::f16, &ARM::HPRRegClass);
742     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
743     setOperationAction(ISD::BITCAST, MVT::f16, Custom);
744 
745     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
746     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
747   }
748 
749   if (Subtarget->hasBF16()) {
750     addRegisterClass(MVT::bf16, &ARM::HPRRegClass);
751     setAllExpand(MVT::bf16);
752     if (!Subtarget->hasFullFP16())
753       setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
754   }
755 
756   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
757     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
758       setTruncStoreAction(VT, InnerVT, Expand);
759       addAllExtLoads(VT, InnerVT, Expand);
760     }
761 
762     setOperationAction(ISD::MULHS, VT, Expand);
763     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
764     setOperationAction(ISD::MULHU, VT, Expand);
765     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
766 
767     setOperationAction(ISD::BSWAP, VT, Expand);
768   }
769 
770   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
771   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
772 
773   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
774   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
775 
776   if (Subtarget->hasMVEIntegerOps())
777     addMVEVectorTypes(Subtarget->hasMVEFloatOps());
778 
779   // Combine low-overhead loop intrinsics so that we can lower i1 types.
780   if (Subtarget->hasLOB()) {
781     setTargetDAGCombine(ISD::BRCOND);
782     setTargetDAGCombine(ISD::BR_CC);
783   }
784 
785   if (Subtarget->hasNEON()) {
786     addDRTypeForNEON(MVT::v2f32);
787     addDRTypeForNEON(MVT::v8i8);
788     addDRTypeForNEON(MVT::v4i16);
789     addDRTypeForNEON(MVT::v2i32);
790     addDRTypeForNEON(MVT::v1i64);
791 
792     addQRTypeForNEON(MVT::v4f32);
793     addQRTypeForNEON(MVT::v2f64);
794     addQRTypeForNEON(MVT::v16i8);
795     addQRTypeForNEON(MVT::v8i16);
796     addQRTypeForNEON(MVT::v4i32);
797     addQRTypeForNEON(MVT::v2i64);
798 
799     if (Subtarget->hasFullFP16()) {
800       addQRTypeForNEON(MVT::v8f16);
801       addDRTypeForNEON(MVT::v4f16);
802     }
803 
804     if (Subtarget->hasBF16()) {
805       addQRTypeForNEON(MVT::v8bf16);
806       addDRTypeForNEON(MVT::v4bf16);
807     }
808   }
809 
810   if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
811     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
812     // none of Neon, MVE or VFP supports any arithmetic operations on it.
813     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
814     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
815     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
816     // FIXME: Code duplication: FDIV and FREM are expanded always, see
817     // ARMTargetLowering::addTypeForNEON method for details.
818     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
819     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
820     // FIXME: Create unittest.
821     // In another words, find a way when "copysign" appears in DAG with vector
822     // operands.
823     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
824     // FIXME: Code duplication: SETCC has custom operation action, see
825     // ARMTargetLowering::addTypeForNEON method for details.
826     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
827     // FIXME: Create unittest for FNEG and for FABS.
828     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
829     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
830     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
831     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
832     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
833     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
834     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
835     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
836     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
837     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
838     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
839     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
840     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
841     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
842     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
843     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
844     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
845     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
846   }
847 
848   if (Subtarget->hasNEON()) {
849     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
850     // supported for v4f32.
851     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
852     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
853     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
854     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
855     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
856     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
857     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
858     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
859     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
860     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
861     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
862     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
863     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
864     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
865 
866     // Mark v2f32 intrinsics.
867     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
868     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
869     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
870     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
871     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
872     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
873     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
874     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
875     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
876     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
877     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
878     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
879     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
880     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
881 
882     // Neon does not support some operations on v1i64 and v2i64 types.
883     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
884     // Custom handling for some quad-vector types to detect VMULL.
885     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
886     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
887     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
888     // Custom handling for some vector types to avoid expensive expansions
889     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
890     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
891     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
892     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
893     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
894     // a destination type that is wider than the source, and nor does
895     // it have a FP_TO_[SU]INT instruction with a narrower destination than
896     // source.
897     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
898     setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
899     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
900     setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
901     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
902     setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
903     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
904     setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
905 
906     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
907     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
908 
909     // NEON does not have single instruction CTPOP for vectors with element
910     // types wider than 8-bits.  However, custom lowering can leverage the
911     // v8i8/v16i8 vcnt instruction.
912     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
913     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
914     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
915     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
916     setOperationAction(ISD::CTPOP,      MVT::v1i64, Custom);
917     setOperationAction(ISD::CTPOP,      MVT::v2i64, Custom);
918 
919     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
920     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
921 
922     // NEON does not have single instruction CTTZ for vectors.
923     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
924     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
925     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
926     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
927 
928     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
929     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
930     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
931     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
932 
933     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
934     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
935     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
936     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
937 
938     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
939     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
940     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
941     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
942 
943     // NEON only has FMA instructions as of VFP4.
944     if (!Subtarget->hasVFP4Base()) {
945       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
946       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
947     }
948 
949     setTargetDAGCombine(ISD::SHL);
950     setTargetDAGCombine(ISD::SRL);
951     setTargetDAGCombine(ISD::SRA);
952     setTargetDAGCombine(ISD::FP_TO_SINT);
953     setTargetDAGCombine(ISD::FP_TO_UINT);
954     setTargetDAGCombine(ISD::FDIV);
955     setTargetDAGCombine(ISD::LOAD);
956 
957     // It is legal to extload from v4i8 to v4i16 or v4i32.
958     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
959                    MVT::v2i32}) {
960       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
961         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
962         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
963         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
964       }
965     }
966   }
967 
968   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
969     setTargetDAGCombine(ISD::BUILD_VECTOR);
970     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
971     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
972     setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
973     setTargetDAGCombine(ISD::STORE);
974     setTargetDAGCombine(ISD::SIGN_EXTEND);
975     setTargetDAGCombine(ISD::ZERO_EXTEND);
976     setTargetDAGCombine(ISD::ANY_EXTEND);
977     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
978     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
979     setTargetDAGCombine(ISD::INTRINSIC_VOID);
980     setTargetDAGCombine(ISD::VECREDUCE_ADD);
981     setTargetDAGCombine(ISD::ADD);
982     setTargetDAGCombine(ISD::BITCAST);
983   }
984   if (Subtarget->hasMVEIntegerOps()) {
985     setTargetDAGCombine(ISD::SMIN);
986     setTargetDAGCombine(ISD::UMIN);
987     setTargetDAGCombine(ISD::SMAX);
988     setTargetDAGCombine(ISD::UMAX);
989     setTargetDAGCombine(ISD::FP_EXTEND);
990   }
991 
992   if (!Subtarget->hasFP64()) {
993     // When targeting a floating-point unit with only single-precision
994     // operations, f64 is legal for the few double-precision instructions which
995     // are present However, no double-precision operations other than moves,
996     // loads and stores are provided by the hardware.
997     setOperationAction(ISD::FADD,       MVT::f64, Expand);
998     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
999     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
1000     setOperationAction(ISD::FMA,        MVT::f64, Expand);
1001     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
1002     setOperationAction(ISD::FREM,       MVT::f64, Expand);
1003     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
1004     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
1005     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
1006     setOperationAction(ISD::FABS,       MVT::f64, Expand);
1007     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
1008     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
1009     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
1010     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
1011     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
1012     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
1013     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
1014     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
1015     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
1016     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
1017     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
1018     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
1019     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
1020     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
1021     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1022     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1023     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1024     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1025     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
1026     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
1027     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
1028     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
1029     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
1030     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
1031     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
1032     setOperationAction(ISD::STRICT_FP_ROUND,   MVT::f32, Custom);
1033   }
1034 
1035   if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
1036     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
1037     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
1038     if (Subtarget->hasFullFP16()) {
1039       setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
1040       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
1041     }
1042   }
1043 
1044   if (!Subtarget->hasFP16()) {
1045     setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
1046     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
1047   }
1048 
1049   computeRegisterProperties(Subtarget->getRegisterInfo());
1050 
1051   // ARM does not have floating-point extending loads.
1052   for (MVT VT : MVT::fp_valuetypes()) {
1053     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1054     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1055   }
1056 
1057   // ... or truncating stores
1058   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1059   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1060   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1061 
1062   // ARM does not have i1 sign extending load.
1063   for (MVT VT : MVT::integer_valuetypes())
1064     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1065 
1066   // ARM supports all 4 flavors of integer indexed load / store.
1067   if (!Subtarget->isThumb1Only()) {
1068     for (unsigned im = (unsigned)ISD::PRE_INC;
1069          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1070       setIndexedLoadAction(im,  MVT::i1,  Legal);
1071       setIndexedLoadAction(im,  MVT::i8,  Legal);
1072       setIndexedLoadAction(im,  MVT::i16, Legal);
1073       setIndexedLoadAction(im,  MVT::i32, Legal);
1074       setIndexedStoreAction(im, MVT::i1,  Legal);
1075       setIndexedStoreAction(im, MVT::i8,  Legal);
1076       setIndexedStoreAction(im, MVT::i16, Legal);
1077       setIndexedStoreAction(im, MVT::i32, Legal);
1078     }
1079   } else {
1080     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
1081     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
1082     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
1083   }
1084 
1085   setOperationAction(ISD::SADDO, MVT::i32, Custom);
1086   setOperationAction(ISD::UADDO, MVT::i32, Custom);
1087   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
1088   setOperationAction(ISD::USUBO, MVT::i32, Custom);
1089 
1090   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
1091   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
1092   if (Subtarget->hasDSP()) {
1093     setOperationAction(ISD::SADDSAT, MVT::i8, Custom);
1094     setOperationAction(ISD::SSUBSAT, MVT::i8, Custom);
1095     setOperationAction(ISD::SADDSAT, MVT::i16, Custom);
1096     setOperationAction(ISD::SSUBSAT, MVT::i16, Custom);
1097   }
1098   if (Subtarget->hasBaseDSP()) {
1099     setOperationAction(ISD::SADDSAT, MVT::i32, Legal);
1100     setOperationAction(ISD::SSUBSAT, MVT::i32, Legal);
1101   }
1102 
1103   // i64 operation support.
1104   setOperationAction(ISD::MUL,     MVT::i64, Expand);
1105   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
1106   if (Subtarget->isThumb1Only()) {
1107     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1108     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1109   }
1110   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
1111       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
1112     setOperationAction(ISD::MULHS, MVT::i32, Expand);
1113 
1114   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
1115   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
1116   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
1117   setOperationAction(ISD::SRL,       MVT::i64, Custom);
1118   setOperationAction(ISD::SRA,       MVT::i64, Custom);
1119   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1120   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1121   setOperationAction(ISD::LOAD, MVT::i64, Custom);
1122   setOperationAction(ISD::STORE, MVT::i64, Custom);
1123 
1124   // MVE lowers 64 bit shifts to lsll and lsrl
1125   // assuming that ISD::SRL and SRA of i64 are already marked custom
1126   if (Subtarget->hasMVEIntegerOps())
1127     setOperationAction(ISD::SHL, MVT::i64, Custom);
1128 
1129   // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
1130   if (Subtarget->isThumb1Only()) {
1131     setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1132     setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1133     setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1134   }
1135 
1136   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
1137     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1138 
1139   // ARM does not have ROTL.
1140   setOperationAction(ISD::ROTL, MVT::i32, Expand);
1141   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1142     setOperationAction(ISD::ROTL, VT, Expand);
1143     setOperationAction(ISD::ROTR, VT, Expand);
1144   }
1145   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
1146   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1147   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
1148     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1149     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
1150   }
1151 
1152   // @llvm.readcyclecounter requires the Performance Monitors extension.
1153   // Default to the 0 expansion on unsupported platforms.
1154   // FIXME: Technically there are older ARM CPUs that have
1155   // implementation-specific ways of obtaining this information.
1156   if (Subtarget->hasPerfMon())
1157     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1158 
1159   // Only ARMv6 has BSWAP.
1160   if (!Subtarget->hasV6Ops())
1161     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1162 
1163   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1164                                         : Subtarget->hasDivideInARMMode();
1165   if (!hasDivide) {
1166     // These are expanded into libcalls if the cpu doesn't have HW divider.
1167     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
1168     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
1169   }
1170 
1171   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1172     setOperationAction(ISD::SDIV, MVT::i32, Custom);
1173     setOperationAction(ISD::UDIV, MVT::i32, Custom);
1174 
1175     setOperationAction(ISD::SDIV, MVT::i64, Custom);
1176     setOperationAction(ISD::UDIV, MVT::i64, Custom);
1177   }
1178 
1179   setOperationAction(ISD::SREM,  MVT::i32, Expand);
1180   setOperationAction(ISD::UREM,  MVT::i32, Expand);
1181 
1182   // Register based DivRem for AEABI (RTABI 4.2)
1183   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
1184       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
1185       Subtarget->isTargetWindows()) {
1186     setOperationAction(ISD::SREM, MVT::i64, Custom);
1187     setOperationAction(ISD::UREM, MVT::i64, Custom);
1188     HasStandaloneRem = false;
1189 
1190     if (Subtarget->isTargetWindows()) {
1191       const struct {
1192         const RTLIB::Libcall Op;
1193         const char * const Name;
1194         const CallingConv::ID CC;
1195       } LibraryCalls[] = {
1196         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1197         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1198         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1199         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1200 
1201         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1202         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1203         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1204         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1205       };
1206 
1207       for (const auto &LC : LibraryCalls) {
1208         setLibcallName(LC.Op, LC.Name);
1209         setLibcallCallingConv(LC.Op, LC.CC);
1210       }
1211     } else {
1212       const struct {
1213         const RTLIB::Libcall Op;
1214         const char * const Name;
1215         const CallingConv::ID CC;
1216       } LibraryCalls[] = {
1217         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1218         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1219         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1220         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1221 
1222         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1223         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1224         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1225         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1226       };
1227 
1228       for (const auto &LC : LibraryCalls) {
1229         setLibcallName(LC.Op, LC.Name);
1230         setLibcallCallingConv(LC.Op, LC.CC);
1231       }
1232     }
1233 
1234     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1235     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1236     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1237     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1238   } else {
1239     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1240     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1241   }
1242 
1243   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
1244     // MSVCRT doesn't have powi; fall back to pow
1245     setLibcallName(RTLIB::POWI_F32, nullptr);
1246     setLibcallName(RTLIB::POWI_F64, nullptr);
1247   }
1248 
1249   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
1250   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
1251   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1252   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1253 
1254   setOperationAction(ISD::TRAP, MVT::Other, Legal);
1255   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
1256 
1257   // Use the default implementation.
1258   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
1259   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
1260   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
1261   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
1262   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
1263   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
1264 
1265   if (Subtarget->isTargetWindows())
1266     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1267   else
1268     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1269 
1270   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1271   // the default expansion.
1272   InsertFencesForAtomic = false;
1273   if (Subtarget->hasAnyDataBarrier() &&
1274       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1275     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1276     // to ldrex/strex loops already.
1277     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
1278     if (!Subtarget->isThumb() || !Subtarget->isMClass())
1279       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
1280 
1281     // On v8, we have particularly efficient implementations of atomic fences
1282     // if they can be combined with nearby atomic loads and stores.
1283     if (!Subtarget->hasAcquireRelease() ||
1284         getTargetMachine().getOptLevel() == 0) {
1285       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1286       InsertFencesForAtomic = true;
1287     }
1288   } else {
1289     // If there's anything we can use as a barrier, go through custom lowering
1290     // for ATOMIC_FENCE.
1291     // If target has DMB in thumb, Fences can be inserted.
1292     if (Subtarget->hasDataBarrier())
1293       InsertFencesForAtomic = true;
1294 
1295     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
1296                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1297 
1298     // Set them all for expansion, which will force libcalls.
1299     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
1300     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
1301     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
1302     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
1303     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
1304     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
1305     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
1306     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1307     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1308     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1309     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1310     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1311     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1312     // Unordered/Monotonic case.
1313     if (!InsertFencesForAtomic) {
1314       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1315       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1316     }
1317   }
1318 
1319   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1320 
1321   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1322   if (!Subtarget->hasV6Ops()) {
1323     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1324     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1325   }
1326   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1327 
1328   if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1329       !Subtarget->isThumb1Only()) {
1330     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1331     // iff target supports vfp2.
1332     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1333     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1334   }
1335 
1336   // We want to custom lower some of our intrinsics.
1337   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1338   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1339   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1340   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1341   if (Subtarget->useSjLjEH())
1342     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1343 
1344   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1345   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1346   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1347   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1348   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1349   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1350   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1351   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1352   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1353   if (Subtarget->hasFullFP16()) {
1354     setOperationAction(ISD::SETCC,     MVT::f16, Expand);
1355     setOperationAction(ISD::SELECT,    MVT::f16, Custom);
1356     setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
1357   }
1358 
1359   setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);
1360 
1361   setOperationAction(ISD::BRCOND,    MVT::Other, Custom);
1362   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1363   if (Subtarget->hasFullFP16())
1364       setOperationAction(ISD::BR_CC, MVT::f16,   Custom);
1365   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1366   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1367   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1368 
1369   // We don't support sin/cos/fmod/copysign/pow
1370   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1371   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1372   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1373   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1374   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1375   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1376   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1377   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1378   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
1379       !Subtarget->isThumb1Only()) {
1380     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1381     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1382   }
1383   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1384   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1385 
1386   if (!Subtarget->hasVFP4Base()) {
1387     setOperationAction(ISD::FMA, MVT::f64, Expand);
1388     setOperationAction(ISD::FMA, MVT::f32, Expand);
1389   }
1390 
1391   // Various VFP goodness
1392   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1393     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1394     if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
1395       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1396       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1397     }
1398 
1399     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1400     if (!Subtarget->hasFP16()) {
1401       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1402       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1403     }
1404 
1405     // Strict floating-point comparisons need custom lowering.
1406     setOperationAction(ISD::STRICT_FSETCC,  MVT::f16, Custom);
1407     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
1408     setOperationAction(ISD::STRICT_FSETCC,  MVT::f32, Custom);
1409     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
1410     setOperationAction(ISD::STRICT_FSETCC,  MVT::f64, Custom);
1411     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
1412   }
1413 
1414   // Use __sincos_stret if available.
1415   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1416       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1417     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1418     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1419   }
1420 
1421   // FP-ARMv8 implements a lot of rounding-like FP operations.
1422   if (Subtarget->hasFPARMv8Base()) {
1423     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1424     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1425     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1426     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1427     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1428     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1429     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1430     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1431     if (Subtarget->hasNEON()) {
1432       setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1433       setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1434       setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1435       setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1436     }
1437 
1438     if (Subtarget->hasFP64()) {
1439       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1440       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1441       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1442       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1443       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1444       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1445       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1446       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1447     }
1448   }
1449 
1450   // FP16 often need to be promoted to call lib functions
1451   if (Subtarget->hasFullFP16()) {
1452     setOperationAction(ISD::FREM, MVT::f16, Promote);
1453     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
1454     setOperationAction(ISD::FSIN, MVT::f16, Promote);
1455     setOperationAction(ISD::FCOS, MVT::f16, Promote);
1456     setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
1457     setOperationAction(ISD::FPOWI, MVT::f16, Promote);
1458     setOperationAction(ISD::FPOW, MVT::f16, Promote);
1459     setOperationAction(ISD::FEXP, MVT::f16, Promote);
1460     setOperationAction(ISD::FEXP2, MVT::f16, Promote);
1461     setOperationAction(ISD::FLOG, MVT::f16, Promote);
1462     setOperationAction(ISD::FLOG10, MVT::f16, Promote);
1463     setOperationAction(ISD::FLOG2, MVT::f16, Promote);
1464 
1465     setOperationAction(ISD::FROUND, MVT::f16, Legal);
1466   }
1467 
1468   if (Subtarget->hasNEON()) {
1469     // vmin and vmax aren't available in a scalar form, so we can use
1470     // a NEON instruction with an undef lane instead.  This has a performance
1471     // penalty on some cores, so we don't do this unless we have been
1472     // asked to by the core tuning model.
1473     if (Subtarget->useNEONForSinglePrecisionFP()) {
1474       setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
1475       setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
1476       setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
1477       setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
1478     }
1479     setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
1480     setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
1481     setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
1482     setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
1483 
1484     if (Subtarget->hasFullFP16()) {
1485       setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
1486       setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
1487       setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
1488       setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);
1489 
1490       setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
1491       setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
1492       setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
1493       setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
1494     }
1495   }
1496 
1497   // We have target-specific dag combine patterns for the following nodes:
1498   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1499   setTargetDAGCombine(ISD::ADD);
1500   setTargetDAGCombine(ISD::SUB);
1501   setTargetDAGCombine(ISD::MUL);
1502   setTargetDAGCombine(ISD::AND);
1503   setTargetDAGCombine(ISD::OR);
1504   setTargetDAGCombine(ISD::XOR);
1505 
1506   if (Subtarget->hasMVEIntegerOps())
1507     setTargetDAGCombine(ISD::VSELECT);
1508 
1509   if (Subtarget->hasV6Ops())
1510     setTargetDAGCombine(ISD::SRL);
1511   if (Subtarget->isThumb1Only())
1512     setTargetDAGCombine(ISD::SHL);
1513 
1514   setStackPointerRegisterToSaveRestore(ARM::SP);
1515 
1516   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1517       !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
1518     setSchedulingPreference(Sched::RegPressure);
1519   else
1520     setSchedulingPreference(Sched::Hybrid);
1521 
1522   //// temporary - rewrite interface to use type
1523   MaxStoresPerMemset = 8;
1524   MaxStoresPerMemsetOptSize = 4;
1525   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1526   MaxStoresPerMemcpyOptSize = 2;
1527   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1528   MaxStoresPerMemmoveOptSize = 2;
1529 
1530   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1531   // are at least 4 bytes aligned.
1532   setMinStackArgumentAlignment(Align(4));
1533 
1534   // Prefer likely predicted branches to selects on out-of-order cores.
1535   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1536 
1537   setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
1538 
1539   setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
1540 
1541   if (Subtarget->isThumb() || Subtarget->isThumb2())
1542     setTargetDAGCombine(ISD::ABS);
1543 }
1544 
1545 bool ARMTargetLowering::useSoftFloat() const {
1546   return Subtarget->useSoftFloat();
1547 }
1548 
1549 // FIXME: It might make sense to define the representative register class as the
1550 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1551 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1552 // SPR's representative would be DPR_VFP2. This should work well if register
1553 // pressure tracking were modified such that a register use would increment the
1554 // pressure of the register class's representative and all of it's super
1555 // classes' representatives transitively. We have not implemented this because
1556 // of the difficulty prior to coalescing of modeling operand register classes
1557 // due to the common occurrence of cross class copies and subregister insertions
1558 // and extractions.
1559 std::pair<const TargetRegisterClass *, uint8_t>
1560 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1561                                            MVT VT) const {
1562   const TargetRegisterClass *RRC = nullptr;
1563   uint8_t Cost = 1;
1564   switch (VT.SimpleTy) {
1565   default:
1566     return TargetLowering::findRepresentativeClass(TRI, VT);
1567   // Use DPR as representative register class for all floating point
1568   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1569   // the cost is 1 for both f32 and f64.
1570   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1571   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1572     RRC = &ARM::DPRRegClass;
1573     // When NEON is used for SP, only half of the register file is available
1574     // because operations that define both SP and DP results will be constrained
1575     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1576     // coalescing by double-counting the SP regs. See the FIXME above.
1577     if (Subtarget->useNEONForSinglePrecisionFP())
1578       Cost = 2;
1579     break;
1580   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1581   case MVT::v4f32: case MVT::v2f64:
1582     RRC = &ARM::DPRRegClass;
1583     Cost = 2;
1584     break;
1585   case MVT::v4i64:
1586     RRC = &ARM::DPRRegClass;
1587     Cost = 4;
1588     break;
1589   case MVT::v8i64:
1590     RRC = &ARM::DPRRegClass;
1591     Cost = 8;
1592     break;
1593   }
1594   return std::make_pair(RRC, Cost);
1595 }
1596 
1597 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1598   switch ((ARMISD::NodeType)Opcode) {
1599   case ARMISD::FIRST_NUMBER:  break;
1600   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
1601   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
1602   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
1603   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1604   case ARMISD::CALL:          return "ARMISD::CALL";
1605   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
1606   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
1607   case ARMISD::tSECALL:       return "ARMISD::tSECALL";
1608   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
1609   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
1610   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
1611   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
1612   case ARMISD::SERET_FLAG:    return "ARMISD::SERET_FLAG";
1613   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
1614   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
1615   case ARMISD::CMP:           return "ARMISD::CMP";
1616   case ARMISD::CMN:           return "ARMISD::CMN";
1617   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
1618   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
1619   case ARMISD::CMPFPE:        return "ARMISD::CMPFPE";
1620   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
1621   case ARMISD::CMPFPEw0:      return "ARMISD::CMPFPEw0";
1622   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
1623   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
1624 
1625   case ARMISD::CMOV:          return "ARMISD::CMOV";
1626   case ARMISD::SUBS:          return "ARMISD::SUBS";
1627 
1628   case ARMISD::SSAT:          return "ARMISD::SSAT";
1629   case ARMISD::USAT:          return "ARMISD::USAT";
1630 
1631   case ARMISD::ASRL:          return "ARMISD::ASRL";
1632   case ARMISD::LSRL:          return "ARMISD::LSRL";
1633   case ARMISD::LSLL:          return "ARMISD::LSLL";
1634 
1635   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
1636   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
1637   case ARMISD::RRX:           return "ARMISD::RRX";
1638 
1639   case ARMISD::ADDC:          return "ARMISD::ADDC";
1640   case ARMISD::ADDE:          return "ARMISD::ADDE";
1641   case ARMISD::SUBC:          return "ARMISD::SUBC";
1642   case ARMISD::SUBE:          return "ARMISD::SUBE";
1643   case ARMISD::LSLS:          return "ARMISD::LSLS";
1644 
1645   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
1646   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
1647   case ARMISD::VMOVhr:        return "ARMISD::VMOVhr";
1648   case ARMISD::VMOVrh:        return "ARMISD::VMOVrh";
1649   case ARMISD::VMOVSR:        return "ARMISD::VMOVSR";
1650 
1651   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1652   case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1653   case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1654 
1655   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
1656 
1657   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1658 
1659   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
1660 
1661   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1662 
1663   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
1664 
1665   case ARMISD::LDRD:          return "ARMISD::LDRD";
1666   case ARMISD::STRD:          return "ARMISD::STRD";
1667 
1668   case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
1669   case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";
1670 
1671   case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST";
1672   case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST";
1673   case ARMISD::VCMP:          return "ARMISD::VCMP";
1674   case ARMISD::VCMPZ:         return "ARMISD::VCMPZ";
1675   case ARMISD::VTST:          return "ARMISD::VTST";
1676 
1677   case ARMISD::VSHLs:         return "ARMISD::VSHLs";
1678   case ARMISD::VSHLu:         return "ARMISD::VSHLu";
1679   case ARMISD::VSHLIMM:       return "ARMISD::VSHLIMM";
1680   case ARMISD::VSHRsIMM:      return "ARMISD::VSHRsIMM";
1681   case ARMISD::VSHRuIMM:      return "ARMISD::VSHRuIMM";
1682   case ARMISD::VRSHRsIMM:     return "ARMISD::VRSHRsIMM";
1683   case ARMISD::VRSHRuIMM:     return "ARMISD::VRSHRuIMM";
1684   case ARMISD::VRSHRNIMM:     return "ARMISD::VRSHRNIMM";
1685   case ARMISD::VQSHLsIMM:     return "ARMISD::VQSHLsIMM";
1686   case ARMISD::VQSHLuIMM:     return "ARMISD::VQSHLuIMM";
1687   case ARMISD::VQSHLsuIMM:    return "ARMISD::VQSHLsuIMM";
1688   case ARMISD::VQSHRNsIMM:    return "ARMISD::VQSHRNsIMM";
1689   case ARMISD::VQSHRNuIMM:    return "ARMISD::VQSHRNuIMM";
1690   case ARMISD::VQSHRNsuIMM:   return "ARMISD::VQSHRNsuIMM";
1691   case ARMISD::VQRSHRNsIMM:   return "ARMISD::VQRSHRNsIMM";
1692   case ARMISD::VQRSHRNuIMM:   return "ARMISD::VQRSHRNuIMM";
1693   case ARMISD::VQRSHRNsuIMM:  return "ARMISD::VQRSHRNsuIMM";
1694   case ARMISD::VSLIIMM:       return "ARMISD::VSLIIMM";
1695   case ARMISD::VSRIIMM:       return "ARMISD::VSRIIMM";
1696   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
1697   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
1698   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
1699   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
1700   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
1701   case ARMISD::VDUP:          return "ARMISD::VDUP";
1702   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
1703   case ARMISD::VEXT:          return "ARMISD::VEXT";
1704   case ARMISD::VREV64:        return "ARMISD::VREV64";
1705   case ARMISD::VREV32:        return "ARMISD::VREV32";
1706   case ARMISD::VREV16:        return "ARMISD::VREV16";
1707   case ARMISD::VZIP:          return "ARMISD::VZIP";
1708   case ARMISD::VUZP:          return "ARMISD::VUZP";
1709   case ARMISD::VTRN:          return "ARMISD::VTRN";
1710   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
1711   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
1712   case ARMISD::VMOVN:         return "ARMISD::VMOVN";
1713   case ARMISD::VQMOVNs:       return "ARMISD::VQMOVNs";
1714   case ARMISD::VQMOVNu:       return "ARMISD::VQMOVNu";
1715   case ARMISD::VCVTN:         return "ARMISD::VCVTN";
1716   case ARMISD::VCVTL:         return "ARMISD::VCVTL";
1717   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
1718   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
1719   case ARMISD::VADDVs:        return "ARMISD::VADDVs";
1720   case ARMISD::VADDVu:        return "ARMISD::VADDVu";
1721   case ARMISD::VADDLVs:       return "ARMISD::VADDLVs";
1722   case ARMISD::VADDLVu:       return "ARMISD::VADDLVu";
1723   case ARMISD::VADDLVAs:      return "ARMISD::VADDLVAs";
1724   case ARMISD::VADDLVAu:      return "ARMISD::VADDLVAu";
1725   case ARMISD::VADDLVps:      return "ARMISD::VADDLVps";
1726   case ARMISD::VADDLVpu:      return "ARMISD::VADDLVpu";
1727   case ARMISD::VADDLVAps:     return "ARMISD::VADDLVAps";
1728   case ARMISD::VADDLVApu:     return "ARMISD::VADDLVApu";
1729   case ARMISD::VMLAVs:        return "ARMISD::VMLAVs";
1730   case ARMISD::VMLAVu:        return "ARMISD::VMLAVu";
1731   case ARMISD::VMLALVs:       return "ARMISD::VMLALVs";
1732   case ARMISD::VMLALVu:       return "ARMISD::VMLALVu";
1733   case ARMISD::VMLALVAs:      return "ARMISD::VMLALVAs";
1734   case ARMISD::VMLALVAu:      return "ARMISD::VMLALVAu";
1735   case ARMISD::UMAAL:         return "ARMISD::UMAAL";
1736   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
1737   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
1738   case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
1739   case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
1740   case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
1741   case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
1742   case ARMISD::SMULWB:        return "ARMISD::SMULWB";
1743   case ARMISD::SMULWT:        return "ARMISD::SMULWT";
1744   case ARMISD::SMLALD:        return "ARMISD::SMLALD";
1745   case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
1746   case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
1747   case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
1748   case ARMISD::SMMLAR:        return "ARMISD::SMMLAR";
1749   case ARMISD::SMMLSR:        return "ARMISD::SMMLSR";
1750   case ARMISD::QADD16b:       return "ARMISD::QADD16b";
1751   case ARMISD::QSUB16b:       return "ARMISD::QSUB16b";
1752   case ARMISD::QADD8b:        return "ARMISD::QADD8b";
1753   case ARMISD::QSUB8b:        return "ARMISD::QSUB8b";
1754   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
1755   case ARMISD::BFI:           return "ARMISD::BFI";
1756   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
1757   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
1758   case ARMISD::VBSL:          return "ARMISD::VBSL";
1759   case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
1760   case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
1761   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
1762   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
1763   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
1764   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
1765   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
1766   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
1767   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
1768   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
1769   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
1770   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
1771   case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
1772   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
1773   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
1774   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
1775   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
1776   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
1777   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
1778   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
1779   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
1780   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
1781   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
1782   case ARMISD::WLS:           return "ARMISD::WLS";
1783   case ARMISD::LE:            return "ARMISD::LE";
1784   case ARMISD::LOOP_DEC:      return "ARMISD::LOOP_DEC";
1785   case ARMISD::CSINV:         return "ARMISD::CSINV";
1786   case ARMISD::CSNEG:         return "ARMISD::CSNEG";
1787   case ARMISD::CSINC:         return "ARMISD::CSINC";
1788   }
1789   return nullptr;
1790 }
1791 
1792 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1793                                           EVT VT) const {
1794   if (!VT.isVector())
1795     return getPointerTy(DL);
1796 
1797   // MVE has a predicate register.
1798   if (Subtarget->hasMVEIntegerOps() &&
1799       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8))
1800     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1801   return VT.changeVectorElementTypeToInteger();
1802 }
1803 
1804 /// getRegClassFor - Return the register class that should be used for the
1805 /// specified value type.
1806 const TargetRegisterClass *
1807 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
1808   (void)isDivergent;
1809   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1810   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1811   // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1812   // MVE Q registers.
1813   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1814     if (VT == MVT::v4i64)
1815       return &ARM::QQPRRegClass;
1816     if (VT == MVT::v8i64)
1817       return &ARM::QQQQPRRegClass;
1818   }
1819   return TargetLowering::getRegClassFor(VT);
1820 }
1821 
1822 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1823 // source/dest is aligned and the copy size is large enough. We therefore want
1824 // to align such objects passed to memory intrinsics.
1825 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1826                                                unsigned &PrefAlign) const {
1827   if (!isa<MemIntrinsic>(CI))
1828     return false;
1829   MinSize = 8;
1830   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1831   // cycle faster than 4-byte aligned LDM.
1832   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1833   return true;
1834 }
1835 
1836 // Create a fast isel object.
1837 FastISel *
1838 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1839                                   const TargetLibraryInfo *libInfo) const {
1840   return ARM::createFastISel(funcInfo, libInfo);
1841 }
1842 
1843 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1844   unsigned NumVals = N->getNumValues();
1845   if (!NumVals)
1846     return Sched::RegPressure;
1847 
1848   for (unsigned i = 0; i != NumVals; ++i) {
1849     EVT VT = N->getValueType(i);
1850     if (VT == MVT::Glue || VT == MVT::Other)
1851       continue;
1852     if (VT.isFloatingPoint() || VT.isVector())
1853       return Sched::ILP;
1854   }
1855 
1856   if (!N->isMachineOpcode())
1857     return Sched::RegPressure;
1858 
1859   // Load are scheduled for latency even if there instruction itinerary
1860   // is not available.
1861   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1862   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1863 
1864   if (MCID.getNumDefs() == 0)
1865     return Sched::RegPressure;
1866   if (!Itins->isEmpty() &&
1867       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1868     return Sched::ILP;
1869 
1870   return Sched::RegPressure;
1871 }
1872 
1873 //===----------------------------------------------------------------------===//
1874 // Lowering Code
1875 //===----------------------------------------------------------------------===//
1876 
1877 static bool isSRL16(const SDValue &Op) {
1878   if (Op.getOpcode() != ISD::SRL)
1879     return false;
1880   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1881     return Const->getZExtValue() == 16;
1882   return false;
1883 }
1884 
1885 static bool isSRA16(const SDValue &Op) {
1886   if (Op.getOpcode() != ISD::SRA)
1887     return false;
1888   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1889     return Const->getZExtValue() == 16;
1890   return false;
1891 }
1892 
1893 static bool isSHL16(const SDValue &Op) {
1894   if (Op.getOpcode() != ISD::SHL)
1895     return false;
1896   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1897     return Const->getZExtValue() == 16;
1898   return false;
1899 }
1900 
1901 // Check for a signed 16-bit value. We special case SRA because it makes it
1902 // more simple when also looking for SRAs that aren't sign extending a
1903 // smaller value. Without the check, we'd need to take extra care with
1904 // checking order for some operations.
1905 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1906   if (isSRA16(Op))
1907     return isSHL16(Op.getOperand(0));
1908   return DAG.ComputeNumSignBits(Op) == 17;
1909 }
1910 
1911 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1912 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1913   switch (CC) {
1914   default: llvm_unreachable("Unknown condition code!");
1915   case ISD::SETNE:  return ARMCC::NE;
1916   case ISD::SETEQ:  return ARMCC::EQ;
1917   case ISD::SETGT:  return ARMCC::GT;
1918   case ISD::SETGE:  return ARMCC::GE;
1919   case ISD::SETLT:  return ARMCC::LT;
1920   case ISD::SETLE:  return ARMCC::LE;
1921   case ISD::SETUGT: return ARMCC::HI;
1922   case ISD::SETUGE: return ARMCC::HS;
1923   case ISD::SETULT: return ARMCC::LO;
1924   case ISD::SETULE: return ARMCC::LS;
1925   }
1926 }
1927 
1928 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1929 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1930                         ARMCC::CondCodes &CondCode2) {
1931   CondCode2 = ARMCC::AL;
1932   switch (CC) {
1933   default: llvm_unreachable("Unknown FP condition!");
1934   case ISD::SETEQ:
1935   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1936   case ISD::SETGT:
1937   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1938   case ISD::SETGE:
1939   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1940   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1941   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1942   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1943   case ISD::SETO:   CondCode = ARMCC::VC; break;
1944   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1945   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1946   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1947   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1948   case ISD::SETLT:
1949   case ISD::SETULT: CondCode = ARMCC::LT; break;
1950   case ISD::SETLE:
1951   case ISD::SETULE: CondCode = ARMCC::LE; break;
1952   case ISD::SETNE:
1953   case ISD::SETUNE: CondCode = ARMCC::NE; break;
1954   }
1955 }
1956 
1957 //===----------------------------------------------------------------------===//
1958 //                      Calling Convention Implementation
1959 //===----------------------------------------------------------------------===//
1960 
1961 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1962 /// account presence of floating point hardware and calling convention
1963 /// limitations, such as support for variadic functions.
1964 CallingConv::ID
1965 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1966                                            bool isVarArg) const {
1967   switch (CC) {
1968   default:
1969     report_fatal_error("Unsupported calling convention");
1970   case CallingConv::ARM_AAPCS:
1971   case CallingConv::ARM_APCS:
1972   case CallingConv::GHC:
1973   case CallingConv::CFGuard_Check:
1974     return CC;
1975   case CallingConv::PreserveMost:
1976     return CallingConv::PreserveMost;
1977   case CallingConv::ARM_AAPCS_VFP:
1978   case CallingConv::Swift:
1979     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1980   case CallingConv::C:
1981     if (!Subtarget->isAAPCS_ABI())
1982       return CallingConv::ARM_APCS;
1983     else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() &&
1984              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1985              !isVarArg)
1986       return CallingConv::ARM_AAPCS_VFP;
1987     else
1988       return CallingConv::ARM_AAPCS;
1989   case CallingConv::Fast:
1990   case CallingConv::CXX_FAST_TLS:
1991     if (!Subtarget->isAAPCS_ABI()) {
1992       if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
1993         return CallingConv::Fast;
1994       return CallingConv::ARM_APCS;
1995     } else if (Subtarget->hasVFP2Base() &&
1996                !Subtarget->isThumb1Only() && !isVarArg)
1997       return CallingConv::ARM_AAPCS_VFP;
1998     else
1999       return CallingConv::ARM_AAPCS;
2000   }
2001 }
2002 
2003 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
2004                                                  bool isVarArg) const {
2005   return CCAssignFnForNode(CC, false, isVarArg);
2006 }
2007 
2008 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
2009                                                    bool isVarArg) const {
2010   return CCAssignFnForNode(CC, true, isVarArg);
2011 }
2012 
2013 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
2014 /// CallingConvention.
2015 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
2016                                                  bool Return,
2017                                                  bool isVarArg) const {
2018   switch (getEffectiveCallingConv(CC, isVarArg)) {
2019   default:
2020     report_fatal_error("Unsupported calling convention");
2021   case CallingConv::ARM_APCS:
2022     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
2023   case CallingConv::ARM_AAPCS:
2024     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2025   case CallingConv::ARM_AAPCS_VFP:
2026     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
2027   case CallingConv::Fast:
2028     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
2029   case CallingConv::GHC:
2030     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
2031   case CallingConv::PreserveMost:
2032     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2033   case CallingConv::CFGuard_Check:
2034     return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
2035   }
2036 }
2037 
2038 SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
2039                                      MVT LocVT, MVT ValVT, SDValue Val) const {
2040   Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()),
2041                     Val);
2042   if (Subtarget->hasFullFP16()) {
2043     Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val);
2044   } else {
2045     Val = DAG.getNode(ISD::TRUNCATE, dl,
2046                       MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2047     Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val);
2048   }
2049   return Val;
2050 }
2051 
2052 SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
2053                                        MVT LocVT, MVT ValVT,
2054                                        SDValue Val) const {
2055   if (Subtarget->hasFullFP16()) {
2056     Val = DAG.getNode(ARMISD::VMOVrh, dl,
2057                       MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2058   } else {
2059     Val = DAG.getNode(ISD::BITCAST, dl,
2060                       MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2061     Val = DAG.getNode(ISD::ZERO_EXTEND, dl,
2062                       MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2063   }
2064   return DAG.getNode(ISD::BITCAST, dl, LocVT, Val);
2065 }
2066 
2067 /// LowerCallResult - Lower the result values of a call into the
2068 /// appropriate copies out of appropriate physical registers.
2069 SDValue ARMTargetLowering::LowerCallResult(
2070     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2071     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2072     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
2073     SDValue ThisVal) const {
2074   // Assign locations to each value returned by this call.
2075   SmallVector<CCValAssign, 16> RVLocs;
2076   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2077                  *DAG.getContext());
2078   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
2079 
2080   // Copy all of the result registers out of their specified physreg.
2081   for (unsigned i = 0; i != RVLocs.size(); ++i) {
2082     CCValAssign VA = RVLocs[i];
2083 
2084     // Pass 'this' value directly from the argument to return value, to avoid
2085     // reg unit interference
2086     if (i == 0 && isThisReturn) {
2087       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
2088              "unexpected return calling convention register assignment");
2089       InVals.push_back(ThisVal);
2090       continue;
2091     }
2092 
2093     SDValue Val;
2094     if (VA.needsCustom() &&
2095         (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) {
2096       // Handle f64 or half of a v2f64.
2097       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2098                                       InFlag);
2099       Chain = Lo.getValue(1);
2100       InFlag = Lo.getValue(2);
2101       VA = RVLocs[++i]; // skip ahead to next loc
2102       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2103                                       InFlag);
2104       Chain = Hi.getValue(1);
2105       InFlag = Hi.getValue(2);
2106       if (!Subtarget->isLittle())
2107         std::swap (Lo, Hi);
2108       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2109 
2110       if (VA.getLocVT() == MVT::v2f64) {
2111         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2112         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2113                           DAG.getConstant(0, dl, MVT::i32));
2114 
2115         VA = RVLocs[++i]; // skip ahead to next loc
2116         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2117         Chain = Lo.getValue(1);
2118         InFlag = Lo.getValue(2);
2119         VA = RVLocs[++i]; // skip ahead to next loc
2120         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2121         Chain = Hi.getValue(1);
2122         InFlag = Hi.getValue(2);
2123         if (!Subtarget->isLittle())
2124           std::swap (Lo, Hi);
2125         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2126         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2127                           DAG.getConstant(1, dl, MVT::i32));
2128       }
2129     } else {
2130       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
2131                                InFlag);
2132       Chain = Val.getValue(1);
2133       InFlag = Val.getValue(2);
2134     }
2135 
2136     switch (VA.getLocInfo()) {
2137     default: llvm_unreachable("Unknown loc info!");
2138     case CCValAssign::Full: break;
2139     case CCValAssign::BCvt:
2140       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
2141       break;
2142     }
2143 
2144     // f16 arguments have their size extended to 4 bytes and passed as if they
2145     // had been copied to the LSBs of a 32-bit register.
2146     // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2147     if (VA.needsCustom() &&
2148         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
2149       Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
2150 
2151     InVals.push_back(Val);
2152   }
2153 
2154   return Chain;
2155 }
2156 
2157 /// LowerMemOpCallTo - Store the argument to the stack.
2158 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
2159                                             SDValue Arg, const SDLoc &dl,
2160                                             SelectionDAG &DAG,
2161                                             const CCValAssign &VA,
2162                                             ISD::ArgFlagsTy Flags) const {
2163   unsigned LocMemOffset = VA.getLocMemOffset();
2164   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2165   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2166                        StackPtr, PtrOff);
2167   return DAG.getStore(
2168       Chain, dl, Arg, PtrOff,
2169       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
2170 }
2171 
2172 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
2173                                          SDValue Chain, SDValue &Arg,
2174                                          RegsToPassVector &RegsToPass,
2175                                          CCValAssign &VA, CCValAssign &NextVA,
2176                                          SDValue &StackPtr,
2177                                          SmallVectorImpl<SDValue> &MemOpChains,
2178                                          ISD::ArgFlagsTy Flags) const {
2179   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2180                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
2181   unsigned id = Subtarget->isLittle() ? 0 : 1;
2182   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
2183 
2184   if (NextVA.isRegLoc())
2185     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
2186   else {
2187     assert(NextVA.isMemLoc());
2188     if (!StackPtr.getNode())
2189       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
2190                                     getPointerTy(DAG.getDataLayout()));
2191 
2192     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
2193                                            dl, DAG, NextVA,
2194                                            Flags));
2195   }
2196 }
2197 
2198 /// LowerCall - Lowering a call into a callseq_start <-
2199 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
2200 /// nodes.
2201 SDValue
2202 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2203                              SmallVectorImpl<SDValue> &InVals) const {
2204   SelectionDAG &DAG                     = CLI.DAG;
2205   SDLoc &dl                             = CLI.DL;
2206   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2207   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
2208   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
2209   SDValue Chain                         = CLI.Chain;
2210   SDValue Callee                        = CLI.Callee;
2211   bool &isTailCall                      = CLI.IsTailCall;
2212   CallingConv::ID CallConv              = CLI.CallConv;
2213   bool doesNotRet                       = CLI.DoesNotReturn;
2214   bool isVarArg                         = CLI.IsVarArg;
2215 
2216   MachineFunction &MF = DAG.getMachineFunction();
2217   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2218   MachineFunction::CallSiteInfo CSInfo;
2219   bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
2220   bool isThisReturn = false;
2221   bool isCmseNSCall   = false;
2222   bool PreferIndirect = false;
2223 
2224   // Determine whether this is a non-secure function call.
2225   if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call"))
2226     isCmseNSCall = true;
2227 
2228   // Disable tail calls if they're not supported.
2229   if (!Subtarget->supportsTailCall())
2230     isTailCall = false;
2231 
2232   // For both the non-secure calls and the returns from a CMSE entry function,
2233   // the function needs to do some extra work afte r the call, or before the
2234   // return, respectively, thus it cannot end with atail call
2235   if (isCmseNSCall || AFI->isCmseNSEntryFunction())
2236     isTailCall = false;
2237 
2238   if (isa<GlobalAddressSDNode>(Callee)) {
2239     // If we're optimizing for minimum size and the function is called three or
2240     // more times in this block, we can improve codesize by calling indirectly
2241     // as BLXr has a 16-bit encoding.
2242     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2243     if (CLI.CB) {
2244       auto *BB = CLI.CB->getParent();
2245       PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
2246                        count_if(GV->users(), [&BB](const User *U) {
2247                          return isa<Instruction>(U) &&
2248                                 cast<Instruction>(U)->getParent() == BB;
2249                        }) > 2;
2250     }
2251   }
2252   if (isTailCall) {
2253     // Check if it's really possible to do a tail call.
2254     isTailCall = IsEligibleForTailCallOptimization(
2255         Callee, CallConv, isVarArg, isStructRet,
2256         MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
2257         PreferIndirect);
2258     if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
2259       report_fatal_error("failed to perform tail call elimination on a call "
2260                          "site marked musttail");
2261     // We don't support GuaranteedTailCallOpt for ARM, only automatically
2262     // detected sibcalls.
2263     if (isTailCall)
2264       ++NumTailCalls;
2265   }
2266 
2267   // Analyze operands of the call, assigning locations to each operand.
2268   SmallVector<CCValAssign, 16> ArgLocs;
2269   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2270                  *DAG.getContext());
2271   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
2272 
2273   // Get a count of how many bytes are to be pushed on the stack.
2274   unsigned NumBytes = CCInfo.getNextStackOffset();
2275 
2276   if (isTailCall) {
2277     // For tail calls, memory operands are available in our caller's stack.
2278     NumBytes = 0;
2279   } else {
2280     // Adjust the stack pointer for the new arguments...
2281     // These operations are automatically eliminated by the prolog/epilog pass
2282     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
2283   }
2284 
2285   SDValue StackPtr =
2286       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
2287 
2288   RegsToPassVector RegsToPass;
2289   SmallVector<SDValue, 8> MemOpChains;
2290 
2291   // Walk the register/memloc assignments, inserting copies/loads.  In the case
2292   // of tail call optimization, arguments are handled later.
2293   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2294        i != e;
2295        ++i, ++realArgIdx) {
2296     CCValAssign &VA = ArgLocs[i];
2297     SDValue Arg = OutVals[realArgIdx];
2298     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2299     bool isByVal = Flags.isByVal();
2300 
2301     // Promote the value if needed.
2302     switch (VA.getLocInfo()) {
2303     default: llvm_unreachable("Unknown loc info!");
2304     case CCValAssign::Full: break;
2305     case CCValAssign::SExt:
2306       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
2307       break;
2308     case CCValAssign::ZExt:
2309       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
2310       break;
2311     case CCValAssign::AExt:
2312       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
2313       break;
2314     case CCValAssign::BCvt:
2315       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2316       break;
2317     }
2318 
2319     // f16 arguments have their size extended to 4 bytes and passed as if they
2320     // had been copied to the LSBs of a 32-bit register.
2321     // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2322     if (VA.needsCustom() &&
2323         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
2324       Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
2325     } else {
2326       // f16 arguments could have been extended prior to argument lowering.
2327       // Mask them arguments if this is a CMSE nonsecure call.
2328       auto ArgVT = Outs[realArgIdx].ArgVT;
2329       if (isCmseNSCall && (ArgVT == MVT::f16)) {
2330         auto LocBits = VA.getLocVT().getSizeInBits();
2331         auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits());
2332         SDValue Mask =
2333             DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
2334         Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
2335         Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
2336         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2337       }
2338     }
2339 
2340     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2341     if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
2342       SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2343                                 DAG.getConstant(0, dl, MVT::i32));
2344       SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2345                                 DAG.getConstant(1, dl, MVT::i32));
2346 
2347       PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2348                        StackPtr, MemOpChains, Flags);
2349 
2350       VA = ArgLocs[++i]; // skip ahead to next loc
2351       if (VA.isRegLoc()) {
2352         PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2353                          StackPtr, MemOpChains, Flags);
2354       } else {
2355         assert(VA.isMemLoc());
2356 
2357         MemOpChains.push_back(
2358             LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags));
2359       }
2360     } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
2361       PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2362                        StackPtr, MemOpChains, Flags);
2363     } else if (VA.isRegLoc()) {
2364       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2365           Outs[0].VT == MVT::i32) {
2366         assert(VA.getLocVT() == MVT::i32 &&
2367                "unexpected calling convention register assignment");
2368         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
2369                "unexpected use of 'returned'");
2370         isThisReturn = true;
2371       }
2372       const TargetOptions &Options = DAG.getTarget().Options;
2373       if (Options.EmitCallSiteInfo)
2374         CSInfo.emplace_back(VA.getLocReg(), i);
2375       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2376     } else if (isByVal) {
2377       assert(VA.isMemLoc());
2378       unsigned offset = 0;
2379 
2380       // True if this byval aggregate will be split between registers
2381       // and memory.
2382       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2383       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2384 
2385       if (CurByValIdx < ByValArgsCount) {
2386 
2387         unsigned RegBegin, RegEnd;
2388         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2389 
2390         EVT PtrVT =
2391             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2392         unsigned int i, j;
2393         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2394           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
2395           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2396           SDValue Load =
2397               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(),
2398                           DAG.InferPtrAlign(AddArg));
2399           MemOpChains.push_back(Load.getValue(1));
2400           RegsToPass.push_back(std::make_pair(j, Load));
2401         }
2402 
2403         // If parameter size outsides register area, "offset" value
2404         // helps us to calculate stack slot for remained part properly.
2405         offset = RegEnd - RegBegin;
2406 
2407         CCInfo.nextInRegsParam();
2408       }
2409 
2410       if (Flags.getByValSize() > 4*offset) {
2411         auto PtrVT = getPointerTy(DAG.getDataLayout());
2412         unsigned LocMemOffset = VA.getLocMemOffset();
2413         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2414         SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
2415         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
2416         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
2417         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
2418                                            MVT::i32);
2419         SDValue AlignNode =
2420             DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32);
2421 
2422         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2423         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2424         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
2425                                           Ops));
2426       }
2427     } else if (!isTailCall) {
2428       assert(VA.isMemLoc());
2429 
2430       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2431                                              dl, DAG, VA, Flags));
2432     }
2433   }
2434 
2435   if (!MemOpChains.empty())
2436     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2437 
2438   // Build a sequence of copy-to-reg nodes chained together with token chain
2439   // and flag operands which copy the outgoing args into the appropriate regs.
2440   SDValue InFlag;
2441   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2442     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2443                              RegsToPass[i].second, InFlag);
2444     InFlag = Chain.getValue(1);
2445   }
2446 
2447   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2448   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2449   // node so that legalize doesn't hack it.
2450   bool isDirect = false;
2451 
2452   const TargetMachine &TM = getTargetMachine();
2453   const Module *Mod = MF.getFunction().getParent();
2454   const GlobalValue *GV = nullptr;
2455   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2456     GV = G->getGlobal();
2457   bool isStub =
2458       !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2459 
2460   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2461   bool isLocalARMFunc = false;
2462   auto PtrVt = getPointerTy(DAG.getDataLayout());
2463 
2464   if (Subtarget->genLongCalls()) {
2465     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2466            "long-calls codegen is not position independent!");
2467     // Handle a global address or an external symbol. If it's not one of
2468     // those, the target's already in a register, so we don't need to do
2469     // anything extra.
2470     if (isa<GlobalAddressSDNode>(Callee)) {
2471       // Create a constant pool entry for the callee address
2472       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2473       ARMConstantPoolValue *CPV =
2474         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2475 
2476       // Get the address of the callee into a register
2477       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2478       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2479       Callee = DAG.getLoad(
2480           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2481           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2482     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2483       const char *Sym = S->getSymbol();
2484 
2485       // Create a constant pool entry for the callee address
2486       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2487       ARMConstantPoolValue *CPV =
2488         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2489                                       ARMPCLabelIndex, 0);
2490       // Get the address of the callee into a register
2491       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2492       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2493       Callee = DAG.getLoad(
2494           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2495           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2496     }
2497   } else if (isa<GlobalAddressSDNode>(Callee)) {
2498     if (!PreferIndirect) {
2499       isDirect = true;
2500       bool isDef = GV->isStrongDefinitionForLinker();
2501 
2502       // ARM call to a local ARM function is predicable.
2503       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2504       // tBX takes a register source operand.
2505       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2506         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2507         Callee = DAG.getNode(
2508             ARMISD::WrapperPIC, dl, PtrVt,
2509             DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2510         Callee = DAG.getLoad(
2511             PtrVt, dl, DAG.getEntryNode(), Callee,
2512             MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2513             /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2514                                      MachineMemOperand::MOInvariant);
2515       } else if (Subtarget->isTargetCOFF()) {
2516         assert(Subtarget->isTargetWindows() &&
2517                "Windows is the only supported COFF target");
2518         unsigned TargetFlags = ARMII::MO_NO_FLAG;
2519         if (GV->hasDLLImportStorageClass())
2520           TargetFlags = ARMII::MO_DLLIMPORT;
2521         else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
2522           TargetFlags = ARMII::MO_COFFSTUB;
2523         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0,
2524                                             TargetFlags);
2525         if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
2526           Callee =
2527               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2528                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2529                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2530       } else {
2531         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2532       }
2533     }
2534   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2535     isDirect = true;
2536     // tBX takes a register source operand.
2537     const char *Sym = S->getSymbol();
2538     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2539       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2540       ARMConstantPoolValue *CPV =
2541         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2542                                       ARMPCLabelIndex, 4);
2543       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2544       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2545       Callee = DAG.getLoad(
2546           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2547           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2548       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2549       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2550     } else {
2551       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2552     }
2553   }
2554 
2555   if (isCmseNSCall) {
2556     assert(!isARMFunc && !isDirect &&
2557            "Cannot handle call to ARM function or direct call");
2558     if (NumBytes > 0) {
2559       DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(),
2560                                      "call to non-secure function would "
2561                                      "require passing arguments on stack",
2562                                      dl.getDebugLoc());
2563       DAG.getContext()->diagnose(Diag);
2564     }
2565     if (isStructRet) {
2566       DiagnosticInfoUnsupported Diag(
2567           DAG.getMachineFunction().getFunction(),
2568           "call to non-secure function would return value through pointer",
2569           dl.getDebugLoc());
2570       DAG.getContext()->diagnose(Diag);
2571     }
2572   }
2573 
2574   // FIXME: handle tail calls differently.
2575   unsigned CallOpc;
2576   if (Subtarget->isThumb()) {
2577     if (isCmseNSCall)
2578       CallOpc = ARMISD::tSECALL;
2579     else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2580       CallOpc = ARMISD::CALL_NOLINK;
2581     else
2582       CallOpc = ARMISD::CALL;
2583   } else {
2584     if (!isDirect && !Subtarget->hasV5TOps())
2585       CallOpc = ARMISD::CALL_NOLINK;
2586     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2587              // Emit regular call when code size is the priority
2588              !Subtarget->hasMinSize())
2589       // "mov lr, pc; b _foo" to avoid confusing the RSP
2590       CallOpc = ARMISD::CALL_NOLINK;
2591     else
2592       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2593   }
2594 
2595   std::vector<SDValue> Ops;
2596   Ops.push_back(Chain);
2597   Ops.push_back(Callee);
2598 
2599   // Add argument registers to the end of the list so that they are known live
2600   // into the call.
2601   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2602     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2603                                   RegsToPass[i].second.getValueType()));
2604 
2605   // Add a register mask operand representing the call-preserved registers.
2606   if (!isTailCall) {
2607     const uint32_t *Mask;
2608     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2609     if (isThisReturn) {
2610       // For 'this' returns, use the R0-preserving mask if applicable
2611       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2612       if (!Mask) {
2613         // Set isThisReturn to false if the calling convention is not one that
2614         // allows 'returned' to be modeled in this way, so LowerCallResult does
2615         // not try to pass 'this' straight through
2616         isThisReturn = false;
2617         Mask = ARI->getCallPreservedMask(MF, CallConv);
2618       }
2619     } else
2620       Mask = ARI->getCallPreservedMask(MF, CallConv);
2621 
2622     assert(Mask && "Missing call preserved mask for calling convention");
2623     Ops.push_back(DAG.getRegisterMask(Mask));
2624   }
2625 
2626   if (InFlag.getNode())
2627     Ops.push_back(InFlag);
2628 
2629   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2630   if (isTailCall) {
2631     MF.getFrameInfo().setHasTailCall();
2632     SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2633     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
2634     return Ret;
2635   }
2636 
2637   // Returns a chain and a flag for retval copy to use.
2638   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2639   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2640   InFlag = Chain.getValue(1);
2641   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
2642 
2643   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2644                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2645   if (!Ins.empty())
2646     InFlag = Chain.getValue(1);
2647 
2648   // Handle result values, copying them out of physregs into vregs that we
2649   // return.
2650   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2651                          InVals, isThisReturn,
2652                          isThisReturn ? OutVals[0] : SDValue());
2653 }
2654 
2655 /// HandleByVal - Every parameter *after* a byval parameter is passed
2656 /// on the stack.  Remember the next parameter register to allocate,
2657 /// and then confiscate the rest of the parameter registers to insure
2658 /// this.
2659 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2660                                     Align Alignment) const {
2661   // Byval (as with any stack) slots are always at least 4 byte aligned.
2662   Alignment = std::max(Alignment, Align(4));
2663 
2664   unsigned Reg = State->AllocateReg(GPRArgRegs);
2665   if (!Reg)
2666     return;
2667 
2668   unsigned AlignInRegs = Alignment.value() / 4;
2669   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2670   for (unsigned i = 0; i < Waste; ++i)
2671     Reg = State->AllocateReg(GPRArgRegs);
2672 
2673   if (!Reg)
2674     return;
2675 
2676   unsigned Excess = 4 * (ARM::R4 - Reg);
2677 
2678   // Special case when NSAA != SP and parameter size greater than size of
2679   // all remained GPR regs. In that case we can't split parameter, we must
2680   // send it to stack. We also must set NCRN to R4, so waste all
2681   // remained registers.
2682   const unsigned NSAAOffset = State->getNextStackOffset();
2683   if (NSAAOffset != 0 && Size > Excess) {
2684     while (State->AllocateReg(GPRArgRegs))
2685       ;
2686     return;
2687   }
2688 
2689   // First register for byval parameter is the first register that wasn't
2690   // allocated before this method call, so it would be "reg".
2691   // If parameter is small enough to be saved in range [reg, r4), then
2692   // the end (first after last) register would be reg + param-size-in-regs,
2693   // else parameter would be splitted between registers and stack,
2694   // end register would be r4 in this case.
2695   unsigned ByValRegBegin = Reg;
2696   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2697   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2698   // Note, first register is allocated in the beginning of function already,
2699   // allocate remained amount of registers we need.
2700   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2701     State->AllocateReg(GPRArgRegs);
2702   // A byval parameter that is split between registers and memory needs its
2703   // size truncated here.
2704   // In the case where the entire structure fits in registers, we set the
2705   // size in memory to zero.
2706   Size = std::max<int>(Size - Excess, 0);
2707 }
2708 
2709 /// MatchingStackOffset - Return true if the given stack call argument is
2710 /// already available in the same position (relatively) of the caller's
2711 /// incoming argument stack.
2712 static
2713 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2714                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2715                          const TargetInstrInfo *TII) {
2716   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2717   int FI = std::numeric_limits<int>::max();
2718   if (Arg.getOpcode() == ISD::CopyFromReg) {
2719     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2720     if (!Register::isVirtualRegister(VR))
2721       return false;
2722     MachineInstr *Def = MRI->getVRegDef(VR);
2723     if (!Def)
2724       return false;
2725     if (!Flags.isByVal()) {
2726       if (!TII->isLoadFromStackSlot(*Def, FI))
2727         return false;
2728     } else {
2729       return false;
2730     }
2731   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2732     if (Flags.isByVal())
2733       // ByVal argument is passed in as a pointer but it's now being
2734       // dereferenced. e.g.
2735       // define @foo(%struct.X* %A) {
2736       //   tail call @bar(%struct.X* byval %A)
2737       // }
2738       return false;
2739     SDValue Ptr = Ld->getBasePtr();
2740     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2741     if (!FINode)
2742       return false;
2743     FI = FINode->getIndex();
2744   } else
2745     return false;
2746 
2747   assert(FI != std::numeric_limits<int>::max());
2748   if (!MFI.isFixedObjectIndex(FI))
2749     return false;
2750   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2751 }
2752 
2753 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2754 /// for tail call optimization. Targets which want to do tail call
2755 /// optimization should implement this function.
2756 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2757     SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
2758     bool isCalleeStructRet, bool isCallerStructRet,
2759     const SmallVectorImpl<ISD::OutputArg> &Outs,
2760     const SmallVectorImpl<SDValue> &OutVals,
2761     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
2762     const bool isIndirect) const {
2763   MachineFunction &MF = DAG.getMachineFunction();
2764   const Function &CallerF = MF.getFunction();
2765   CallingConv::ID CallerCC = CallerF.getCallingConv();
2766 
2767   assert(Subtarget->supportsTailCall());
2768 
2769   // Indirect tail calls cannot be optimized for Thumb1 if the args
2770   // to the call take up r0-r3. The reason is that there are no legal registers
2771   // left to hold the pointer to the function to be called.
2772   if (Subtarget->isThumb1Only() && Outs.size() >= 4 &&
2773       (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect))
2774     return false;
2775 
2776   // Look for obvious safe cases to perform tail call optimization that do not
2777   // require ABI changes. This is what gcc calls sibcall.
2778 
2779   // Exception-handling functions need a special set of instructions to indicate
2780   // a return to the hardware. Tail-calling another function would probably
2781   // break this.
2782   if (CallerF.hasFnAttribute("interrupt"))
2783     return false;
2784 
2785   // Also avoid sibcall optimization if either caller or callee uses struct
2786   // return semantics.
2787   if (isCalleeStructRet || isCallerStructRet)
2788     return false;
2789 
2790   // Externally-defined functions with weak linkage should not be
2791   // tail-called on ARM when the OS does not support dynamic
2792   // pre-emption of symbols, as the AAELF spec requires normal calls
2793   // to undefined weak functions to be replaced with a NOP or jump to the
2794   // next instruction. The behaviour of branch instructions in this
2795   // situation (as used for tail calls) is implementation-defined, so we
2796   // cannot rely on the linker replacing the tail call with a return.
2797   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2798     const GlobalValue *GV = G->getGlobal();
2799     const Triple &TT = getTargetMachine().getTargetTriple();
2800     if (GV->hasExternalWeakLinkage() &&
2801         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2802       return false;
2803   }
2804 
2805   // Check that the call results are passed in the same way.
2806   LLVMContext &C = *DAG.getContext();
2807   if (!CCState::resultsCompatible(
2808           getEffectiveCallingConv(CalleeCC, isVarArg),
2809           getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins,
2810           CCAssignFnForReturn(CalleeCC, isVarArg),
2811           CCAssignFnForReturn(CallerCC, CallerF.isVarArg())))
2812     return false;
2813   // The callee has to preserve all registers the caller needs to preserve.
2814   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2815   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2816   if (CalleeCC != CallerCC) {
2817     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2818     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2819       return false;
2820   }
2821 
2822   // If Caller's vararg or byval argument has been split between registers and
2823   // stack, do not perform tail call, since part of the argument is in caller's
2824   // local frame.
2825   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2826   if (AFI_Caller->getArgRegsSaveSize())
2827     return false;
2828 
2829   // If the callee takes no arguments then go on to check the results of the
2830   // call.
2831   if (!Outs.empty()) {
2832     // Check if stack adjustment is needed. For now, do not do this if any
2833     // argument is passed on the stack.
2834     SmallVector<CCValAssign, 16> ArgLocs;
2835     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2836     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2837     if (CCInfo.getNextStackOffset()) {
2838       // Check if the arguments are already laid out in the right way as
2839       // the caller's fixed stack objects.
2840       MachineFrameInfo &MFI = MF.getFrameInfo();
2841       const MachineRegisterInfo *MRI = &MF.getRegInfo();
2842       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2843       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2844            i != e;
2845            ++i, ++realArgIdx) {
2846         CCValAssign &VA = ArgLocs[i];
2847         EVT RegVT = VA.getLocVT();
2848         SDValue Arg = OutVals[realArgIdx];
2849         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2850         if (VA.getLocInfo() == CCValAssign::Indirect)
2851           return false;
2852         if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
2853           // f64 and vector types are split into multiple registers or
2854           // register/stack-slot combinations.  The types will not match
2855           // the registers; give up on memory f64 refs until we figure
2856           // out what to do about this.
2857           if (!VA.isRegLoc())
2858             return false;
2859           if (!ArgLocs[++i].isRegLoc())
2860             return false;
2861           if (RegVT == MVT::v2f64) {
2862             if (!ArgLocs[++i].isRegLoc())
2863               return false;
2864             if (!ArgLocs[++i].isRegLoc())
2865               return false;
2866           }
2867         } else if (!VA.isRegLoc()) {
2868           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2869                                    MFI, MRI, TII))
2870             return false;
2871         }
2872       }
2873     }
2874 
2875     const MachineRegisterInfo &MRI = MF.getRegInfo();
2876     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2877       return false;
2878   }
2879 
2880   return true;
2881 }
2882 
2883 bool
2884 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2885                                   MachineFunction &MF, bool isVarArg,
2886                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
2887                                   LLVMContext &Context) const {
2888   SmallVector<CCValAssign, 16> RVLocs;
2889   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2890   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2891 }
2892 
2893 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2894                                     const SDLoc &DL, SelectionDAG &DAG) {
2895   const MachineFunction &MF = DAG.getMachineFunction();
2896   const Function &F = MF.getFunction();
2897 
2898   StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
2899 
2900   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2901   // version of the "preferred return address". These offsets affect the return
2902   // instruction if this is a return from PL1 without hypervisor extensions.
2903   //    IRQ/FIQ: +4     "subs pc, lr, #4"
2904   //    SWI:     0      "subs pc, lr, #0"
2905   //    ABORT:   +4     "subs pc, lr, #4"
2906   //    UNDEF:   +4/+2  "subs pc, lr, #0"
2907   // UNDEF varies depending on where the exception came from ARM or Thumb
2908   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2909 
2910   int64_t LROffset;
2911   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2912       IntKind == "ABORT")
2913     LROffset = 4;
2914   else if (IntKind == "SWI" || IntKind == "UNDEF")
2915     LROffset = 0;
2916   else
2917     report_fatal_error("Unsupported interrupt attribute. If present, value "
2918                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2919 
2920   RetOps.insert(RetOps.begin() + 1,
2921                 DAG.getConstant(LROffset, DL, MVT::i32, false));
2922 
2923   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2924 }
2925 
2926 SDValue
2927 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2928                                bool isVarArg,
2929                                const SmallVectorImpl<ISD::OutputArg> &Outs,
2930                                const SmallVectorImpl<SDValue> &OutVals,
2931                                const SDLoc &dl, SelectionDAG &DAG) const {
2932   // CCValAssign - represent the assignment of the return value to a location.
2933   SmallVector<CCValAssign, 16> RVLocs;
2934 
2935   // CCState - Info about the registers and stack slots.
2936   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2937                  *DAG.getContext());
2938 
2939   // Analyze outgoing return values.
2940   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2941 
2942   SDValue Flag;
2943   SmallVector<SDValue, 4> RetOps;
2944   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2945   bool isLittleEndian = Subtarget->isLittle();
2946 
2947   MachineFunction &MF = DAG.getMachineFunction();
2948   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2949   AFI->setReturnRegsCount(RVLocs.size());
2950 
2951  // Report error if cmse entry function returns structure through first ptr arg.
2952   if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) {
2953     // Note: using an empty SDLoc(), as the first line of the function is a
2954     // better place to report than the last line.
2955     DiagnosticInfoUnsupported Diag(
2956         DAG.getMachineFunction().getFunction(),
2957         "secure entry function would return value through pointer",
2958         SDLoc().getDebugLoc());
2959     DAG.getContext()->diagnose(Diag);
2960   }
2961 
2962   // Copy the result values into the output registers.
2963   for (unsigned i = 0, realRVLocIdx = 0;
2964        i != RVLocs.size();
2965        ++i, ++realRVLocIdx) {
2966     CCValAssign &VA = RVLocs[i];
2967     assert(VA.isRegLoc() && "Can only return in registers!");
2968 
2969     SDValue Arg = OutVals[realRVLocIdx];
2970     bool ReturnF16 = false;
2971 
2972     if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
2973       // Half-precision return values can be returned like this:
2974       //
2975       // t11 f16 = fadd ...
2976       // t12: i16 = bitcast t11
2977       //   t13: i32 = zero_extend t12
2978       // t14: f32 = bitcast t13  <~~~~~~~ Arg
2979       //
2980       // to avoid code generation for bitcasts, we simply set Arg to the node
2981       // that produces the f16 value, t11 in this case.
2982       //
2983       if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
2984         SDValue ZE = Arg.getOperand(0);
2985         if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
2986           SDValue BC = ZE.getOperand(0);
2987           if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
2988             Arg = BC.getOperand(0);
2989             ReturnF16 = true;
2990           }
2991         }
2992       }
2993     }
2994 
2995     switch (VA.getLocInfo()) {
2996     default: llvm_unreachable("Unknown loc info!");
2997     case CCValAssign::Full: break;
2998     case CCValAssign::BCvt:
2999       if (!ReturnF16)
3000         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3001       break;
3002     }
3003 
3004     // Mask f16 arguments if this is a CMSE nonsecure entry.
3005     auto RetVT = Outs[realRVLocIdx].ArgVT;
3006     if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) {
3007       if (VA.needsCustom() && VA.getValVT() == MVT::f16) {
3008         Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
3009       } else {
3010         auto LocBits = VA.getLocVT().getSizeInBits();
3011         auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits());
3012         SDValue Mask =
3013             DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
3014         Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
3015         Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
3016         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3017       }
3018     }
3019 
3020     if (VA.needsCustom() &&
3021         (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) {
3022       if (VA.getLocVT() == MVT::v2f64) {
3023         // Extract the first half and return it in two registers.
3024         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3025                                    DAG.getConstant(0, dl, MVT::i32));
3026         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
3027                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
3028 
3029         Chain =
3030             DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3031                              HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag);
3032         Flag = Chain.getValue(1);
3033         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3034         VA = RVLocs[++i]; // skip ahead to next loc
3035         Chain =
3036             DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3037                              HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag);
3038         Flag = Chain.getValue(1);
3039         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3040         VA = RVLocs[++i]; // skip ahead to next loc
3041 
3042         // Extract the 2nd half and fall through to handle it as an f64 value.
3043         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3044                           DAG.getConstant(1, dl, MVT::i32));
3045       }
3046       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
3047       // available.
3048       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
3049                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
3050       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3051                                fmrrd.getValue(isLittleEndian ? 0 : 1), Flag);
3052       Flag = Chain.getValue(1);
3053       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3054       VA = RVLocs[++i]; // skip ahead to next loc
3055       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3056                                fmrrd.getValue(isLittleEndian ? 1 : 0), Flag);
3057     } else
3058       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
3059 
3060     // Guarantee that all emitted copies are
3061     // stuck together, avoiding something bad.
3062     Flag = Chain.getValue(1);
3063     RetOps.push_back(DAG.getRegister(
3064         VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT()));
3065   }
3066   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
3067   const MCPhysReg *I =
3068       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3069   if (I) {
3070     for (; *I; ++I) {
3071       if (ARM::GPRRegClass.contains(*I))
3072         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
3073       else if (ARM::DPRRegClass.contains(*I))
3074         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
3075       else
3076         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3077     }
3078   }
3079 
3080   // Update chain and glue.
3081   RetOps[0] = Chain;
3082   if (Flag.getNode())
3083     RetOps.push_back(Flag);
3084 
3085   // CPUs which aren't M-class use a special sequence to return from
3086   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
3087   // though we use "subs pc, lr, #N").
3088   //
3089   // M-class CPUs actually use a normal return sequence with a special
3090   // (hardware-provided) value in LR, so the normal code path works.
3091   if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
3092       !Subtarget->isMClass()) {
3093     if (Subtarget->isThumb1Only())
3094       report_fatal_error("interrupt attribute is not supported in Thumb1");
3095     return LowerInterruptReturn(RetOps, dl, DAG);
3096   }
3097 
3098   ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG :
3099                                                             ARMISD::RET_FLAG;
3100   return DAG.getNode(RetNode, dl, MVT::Other, RetOps);
3101 }
3102 
3103 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3104   if (N->getNumValues() != 1)
3105     return false;
3106   if (!N->hasNUsesOfValue(1, 0))
3107     return false;
3108 
3109   SDValue TCChain = Chain;
3110   SDNode *Copy = *N->use_begin();
3111   if (Copy->getOpcode() == ISD::CopyToReg) {
3112     // If the copy has a glue operand, we conservatively assume it isn't safe to
3113     // perform a tail call.
3114     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3115       return false;
3116     TCChain = Copy->getOperand(0);
3117   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
3118     SDNode *VMov = Copy;
3119     // f64 returned in a pair of GPRs.
3120     SmallPtrSet<SDNode*, 2> Copies;
3121     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
3122          UI != UE; ++UI) {
3123       if (UI->getOpcode() != ISD::CopyToReg)
3124         return false;
3125       Copies.insert(*UI);
3126     }
3127     if (Copies.size() > 2)
3128       return false;
3129 
3130     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
3131          UI != UE; ++UI) {
3132       SDValue UseChain = UI->getOperand(0);
3133       if (Copies.count(UseChain.getNode()))
3134         // Second CopyToReg
3135         Copy = *UI;
3136       else {
3137         // We are at the top of this chain.
3138         // If the copy has a glue operand, we conservatively assume it
3139         // isn't safe to perform a tail call.
3140         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
3141           return false;
3142         // First CopyToReg
3143         TCChain = UseChain;
3144       }
3145     }
3146   } else if (Copy->getOpcode() == ISD::BITCAST) {
3147     // f32 returned in a single GPR.
3148     if (!Copy->hasOneUse())
3149       return false;
3150     Copy = *Copy->use_begin();
3151     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
3152       return false;
3153     // If the copy has a glue operand, we conservatively assume it isn't safe to
3154     // perform a tail call.
3155     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3156       return false;
3157     TCChain = Copy->getOperand(0);
3158   } else {
3159     return false;
3160   }
3161 
3162   bool HasRet = false;
3163   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
3164        UI != UE; ++UI) {
3165     if (UI->getOpcode() != ARMISD::RET_FLAG &&
3166         UI->getOpcode() != ARMISD::INTRET_FLAG)
3167       return false;
3168     HasRet = true;
3169   }
3170 
3171   if (!HasRet)
3172     return false;
3173 
3174   Chain = TCChain;
3175   return true;
3176 }
3177 
3178 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3179   if (!Subtarget->supportsTailCall())
3180     return false;
3181 
3182   if (!CI->isTailCall())
3183     return false;
3184 
3185   return true;
3186 }
3187 
3188 // Trying to write a 64 bit value so need to split into two 32 bit values first,
3189 // and pass the lower and high parts through.
3190 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
3191   SDLoc DL(Op);
3192   SDValue WriteValue = Op->getOperand(2);
3193 
3194   // This function is only supposed to be called for i64 type argument.
3195   assert(WriteValue.getValueType() == MVT::i64
3196           && "LowerWRITE_REGISTER called for non-i64 type argument.");
3197 
3198   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3199                            DAG.getConstant(0, DL, MVT::i32));
3200   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3201                            DAG.getConstant(1, DL, MVT::i32));
3202   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
3203   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
3204 }
3205 
3206 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3207 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
3208 // one of the above mentioned nodes. It has to be wrapped because otherwise
3209 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3210 // be used to form addressing mode. These wrapped nodes will be selected
3211 // into MOVi.
3212 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
3213                                              SelectionDAG &DAG) const {
3214   EVT PtrVT = Op.getValueType();
3215   // FIXME there is no actual debug info here
3216   SDLoc dl(Op);
3217   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3218   SDValue Res;
3219 
3220   // When generating execute-only code Constant Pools must be promoted to the
3221   // global data section. It's a bit ugly that we can't share them across basic
3222   // blocks, but this way we guarantee that execute-only behaves correct with
3223   // position-independent addressing modes.
3224   if (Subtarget->genExecuteOnly()) {
3225     auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
3226     auto T = const_cast<Type*>(CP->getType());
3227     auto C = const_cast<Constant*>(CP->getConstVal());
3228     auto M = const_cast<Module*>(DAG.getMachineFunction().
3229                                  getFunction().getParent());
3230     auto GV = new GlobalVariable(
3231                     *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
3232                     Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
3233                     Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
3234                     Twine(AFI->createPICLabelUId())
3235                   );
3236     SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
3237                                             dl, PtrVT);
3238     return LowerGlobalAddress(GA, DAG);
3239   }
3240 
3241   if (CP->isMachineConstantPoolEntry())
3242     Res =
3243         DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign());
3244   else
3245     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign());
3246   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
3247 }
3248 
3249 unsigned ARMTargetLowering::getJumpTableEncoding() const {
3250   return MachineJumpTableInfo::EK_Inline;
3251 }
3252 
3253 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
3254                                              SelectionDAG &DAG) const {
3255   MachineFunction &MF = DAG.getMachineFunction();
3256   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3257   unsigned ARMPCLabelIndex = 0;
3258   SDLoc DL(Op);
3259   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3260   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
3261   SDValue CPAddr;
3262   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
3263   if (!IsPositionIndependent) {
3264     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4));
3265   } else {
3266     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3267     ARMPCLabelIndex = AFI->createPICLabelUId();
3268     ARMConstantPoolValue *CPV =
3269       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
3270                                       ARMCP::CPBlockAddress, PCAdj);
3271     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3272   }
3273   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
3274   SDValue Result = DAG.getLoad(
3275       PtrVT, DL, DAG.getEntryNode(), CPAddr,
3276       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3277   if (!IsPositionIndependent)
3278     return Result;
3279   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
3280   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
3281 }
3282 
3283 /// Convert a TLS address reference into the correct sequence of loads
3284 /// and calls to compute the variable's address for Darwin, and return an
3285 /// SDValue containing the final node.
3286 
3287 /// Darwin only has one TLS scheme which must be capable of dealing with the
3288 /// fully general situation, in the worst case. This means:
3289 ///     + "extern __thread" declaration.
3290 ///     + Defined in a possibly unknown dynamic library.
3291 ///
3292 /// The general system is that each __thread variable has a [3 x i32] descriptor
3293 /// which contains information used by the runtime to calculate the address. The
3294 /// only part of this the compiler needs to know about is the first word, which
3295 /// contains a function pointer that must be called with the address of the
3296 /// entire descriptor in "r0".
3297 ///
3298 /// Since this descriptor may be in a different unit, in general access must
3299 /// proceed along the usual ARM rules. A common sequence to produce is:
3300 ///
3301 ///     movw rT1, :lower16:_var$non_lazy_ptr
3302 ///     movt rT1, :upper16:_var$non_lazy_ptr
3303 ///     ldr r0, [rT1]
3304 ///     ldr rT2, [r0]
3305 ///     blx rT2
3306 ///     [...address now in r0...]
3307 SDValue
3308 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
3309                                                SelectionDAG &DAG) const {
3310   assert(Subtarget->isTargetDarwin() &&
3311          "This function expects a Darwin target");
3312   SDLoc DL(Op);
3313 
3314   // First step is to get the address of the actua global symbol. This is where
3315   // the TLS descriptor lives.
3316   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3317 
3318   // The first entry in the descriptor is a function pointer that we must call
3319   // to obtain the address of the variable.
3320   SDValue Chain = DAG.getEntryNode();
3321   SDValue FuncTLVGet = DAG.getLoad(
3322       MVT::i32, DL, Chain, DescAddr,
3323       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
3324       /* Alignment = */ 4,
3325       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
3326           MachineMemOperand::MOInvariant);
3327   Chain = FuncTLVGet.getValue(1);
3328 
3329   MachineFunction &F = DAG.getMachineFunction();
3330   MachineFrameInfo &MFI = F.getFrameInfo();
3331   MFI.setAdjustsStack(true);
3332 
3333   // TLS calls preserve all registers except those that absolutely must be
3334   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3335   // silly).
3336   auto TRI =
3337       getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
3338   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
3339   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
3340 
3341   // Finally, we can make the call. This is just a degenerate version of a
3342   // normal AArch64 call node: r0 takes the address of the descriptor, and
3343   // returns the address of the variable in this thread.
3344   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
3345   Chain =
3346       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
3347                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
3348                   DAG.getRegisterMask(Mask), Chain.getValue(1));
3349   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
3350 }
3351 
3352 SDValue
3353 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
3354                                                 SelectionDAG &DAG) const {
3355   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
3356 
3357   SDValue Chain = DAG.getEntryNode();
3358   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3359   SDLoc DL(Op);
3360 
3361   // Load the current TEB (thread environment block)
3362   SDValue Ops[] = {Chain,
3363                    DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
3364                    DAG.getTargetConstant(15, DL, MVT::i32),
3365                    DAG.getTargetConstant(0, DL, MVT::i32),
3366                    DAG.getTargetConstant(13, DL, MVT::i32),
3367                    DAG.getTargetConstant(0, DL, MVT::i32),
3368                    DAG.getTargetConstant(2, DL, MVT::i32)};
3369   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
3370                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
3371 
3372   SDValue TEB = CurrentTEB.getValue(0);
3373   Chain = CurrentTEB.getValue(1);
3374 
3375   // Load the ThreadLocalStoragePointer from the TEB
3376   // A pointer to the TLS array is located at offset 0x2c from the TEB.
3377   SDValue TLSArray =
3378       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
3379   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
3380 
3381   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3382   // offset into the TLSArray.
3383 
3384   // Load the TLS index from the C runtime
3385   SDValue TLSIndex =
3386       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
3387   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
3388   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
3389 
3390   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
3391                               DAG.getConstant(2, DL, MVT::i32));
3392   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
3393                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
3394                             MachinePointerInfo());
3395 
3396   // Get the offset of the start of the .tls section (section base)
3397   const auto *GA = cast<GlobalAddressSDNode>(Op);
3398   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
3399   SDValue Offset = DAG.getLoad(
3400       PtrVT, DL, Chain,
3401       DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
3402                   DAG.getTargetConstantPool(CPV, PtrVT, Align(4))),
3403       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3404 
3405   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
3406 }
3407 
3408 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3409 SDValue
3410 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
3411                                                  SelectionDAG &DAG) const {
3412   SDLoc dl(GA);
3413   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3414   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3415   MachineFunction &MF = DAG.getMachineFunction();
3416   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3417   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3418   ARMConstantPoolValue *CPV =
3419     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3420                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
3421   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3422   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
3423   Argument = DAG.getLoad(
3424       PtrVT, dl, DAG.getEntryNode(), Argument,
3425       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3426   SDValue Chain = Argument.getValue(1);
3427 
3428   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3429   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
3430 
3431   // call __tls_get_addr.
3432   ArgListTy Args;
3433   ArgListEntry Entry;
3434   Entry.Node = Argument;
3435   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
3436   Args.push_back(Entry);
3437 
3438   // FIXME: is there useful debug info available here?
3439   TargetLowering::CallLoweringInfo CLI(DAG);
3440   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3441       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
3442       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
3443 
3444   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3445   return CallResult.first;
3446 }
3447 
3448 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3449 // "local exec" model.
3450 SDValue
3451 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
3452                                         SelectionDAG &DAG,
3453                                         TLSModel::Model model) const {
3454   const GlobalValue *GV = GA->getGlobal();
3455   SDLoc dl(GA);
3456   SDValue Offset;
3457   SDValue Chain = DAG.getEntryNode();
3458   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3459   // Get the Thread Pointer
3460   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3461 
3462   if (model == TLSModel::InitialExec) {
3463     MachineFunction &MF = DAG.getMachineFunction();
3464     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3465     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3466     // Initial exec model.
3467     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3468     ARMConstantPoolValue *CPV =
3469       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3470                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
3471                                       true);
3472     Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3473     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3474     Offset = DAG.getLoad(
3475         PtrVT, dl, Chain, Offset,
3476         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3477     Chain = Offset.getValue(1);
3478 
3479     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3480     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
3481 
3482     Offset = DAG.getLoad(
3483         PtrVT, dl, Chain, Offset,
3484         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3485   } else {
3486     // local exec model
3487     assert(model == TLSModel::LocalExec);
3488     ARMConstantPoolValue *CPV =
3489       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
3490     Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3491     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3492     Offset = DAG.getLoad(
3493         PtrVT, dl, Chain, Offset,
3494         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3495   }
3496 
3497   // The address of the thread local variable is the add of the thread
3498   // pointer with the offset of the variable.
3499   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3500 }
3501 
3502 SDValue
3503 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3504   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3505   if (DAG.getTarget().useEmulatedTLS())
3506     return LowerToTLSEmulatedModel(GA, DAG);
3507 
3508   if (Subtarget->isTargetDarwin())
3509     return LowerGlobalTLSAddressDarwin(Op, DAG);
3510 
3511   if (Subtarget->isTargetWindows())
3512     return LowerGlobalTLSAddressWindows(Op, DAG);
3513 
3514   // TODO: implement the "local dynamic" model
3515   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3516   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3517 
3518   switch (model) {
3519     case TLSModel::GeneralDynamic:
3520     case TLSModel::LocalDynamic:
3521       return LowerToTLSGeneralDynamicModel(GA, DAG);
3522     case TLSModel::InitialExec:
3523     case TLSModel::LocalExec:
3524       return LowerToTLSExecModels(GA, DAG, model);
3525   }
3526   llvm_unreachable("bogus TLS model");
3527 }
3528 
3529 /// Return true if all users of V are within function F, looking through
3530 /// ConstantExprs.
3531 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3532   SmallVector<const User*,4> Worklist;
3533   for (auto *U : V->users())
3534     Worklist.push_back(U);
3535   while (!Worklist.empty()) {
3536     auto *U = Worklist.pop_back_val();
3537     if (isa<ConstantExpr>(U)) {
3538       for (auto *UU : U->users())
3539         Worklist.push_back(UU);
3540       continue;
3541     }
3542 
3543     auto *I = dyn_cast<Instruction>(U);
3544     if (!I || I->getParent()->getParent() != F)
3545       return false;
3546   }
3547   return true;
3548 }
3549 
3550 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
3551                                      const GlobalValue *GV, SelectionDAG &DAG,
3552                                      EVT PtrVT, const SDLoc &dl) {
3553   // If we're creating a pool entry for a constant global with unnamed address,
3554   // and the global is small enough, we can emit it inline into the constant pool
3555   // to save ourselves an indirection.
3556   //
3557   // This is a win if the constant is only used in one function (so it doesn't
3558   // need to be duplicated) or duplicating the constant wouldn't increase code
3559   // size (implying the constant is no larger than 4 bytes).
3560   const Function &F = DAG.getMachineFunction().getFunction();
3561 
3562   // We rely on this decision to inline being idemopotent and unrelated to the
3563   // use-site. We know that if we inline a variable at one use site, we'll
3564   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3565   // doesn't know about this optimization, so bail out if it's enabled else
3566   // we could decide to inline here (and thus never emit the GV) but require
3567   // the GV from fast-isel generated code.
3568   if (!EnableConstpoolPromotion ||
3569       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3570       return SDValue();
3571 
3572   auto *GVar = dyn_cast<GlobalVariable>(GV);
3573   if (!GVar || !GVar->hasInitializer() ||
3574       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3575       !GVar->hasLocalLinkage())
3576     return SDValue();
3577 
3578   // If we inline a value that contains relocations, we move the relocations
3579   // from .data to .text. This is not allowed in position-independent code.
3580   auto *Init = GVar->getInitializer();
3581   if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
3582       Init->needsRelocation())
3583     return SDValue();
3584 
3585   // The constant islands pass can only really deal with alignment requests
3586   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3587   // any type wanting greater alignment requirements than 4 bytes. We also
3588   // can only promote constants that are multiples of 4 bytes in size or
3589   // are paddable to a multiple of 4. Currently we only try and pad constants
3590   // that are strings for simplicity.
3591   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3592   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3593   Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar);
3594   unsigned RequiredPadding = 4 - (Size % 4);
3595   bool PaddingPossible =
3596     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3597   if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize ||
3598       Size == 0)
3599     return SDValue();
3600 
3601   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3602   MachineFunction &MF = DAG.getMachineFunction();
3603   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3604 
3605   // We can't bloat the constant pool too much, else the ConstantIslands pass
3606   // may fail to converge. If we haven't promoted this global yet (it may have
3607   // multiple uses), and promoting it would increase the constant pool size (Sz
3608   // > 4), ensure we have space to do so up to MaxTotal.
3609   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3610     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3611         ConstpoolPromotionMaxTotal)
3612       return SDValue();
3613 
3614   // This is only valid if all users are in a single function; we can't clone
3615   // the constant in general. The LLVM IR unnamed_addr allows merging
3616   // constants, but not cloning them.
3617   //
3618   // We could potentially allow cloning if we could prove all uses of the
3619   // constant in the current function don't care about the address, like
3620   // printf format strings. But that isn't implemented for now.
3621   if (!allUsersAreInFunction(GVar, &F))
3622     return SDValue();
3623 
3624   // We're going to inline this global. Pad it out if needed.
3625   if (RequiredPadding != 4) {
3626     StringRef S = CDAInit->getAsString();
3627 
3628     SmallVector<uint8_t,16> V(S.size());
3629     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3630     while (RequiredPadding--)
3631       V.push_back(0);
3632     Init = ConstantDataArray::get(*DAG.getContext(), V);
3633   }
3634 
3635   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3636   SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4));
3637   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3638     AFI->markGlobalAsPromotedToConstantPool(GVar);
3639     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3640                                       PaddedSize - 4);
3641   }
3642   ++NumConstpoolPromoted;
3643   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3644 }
3645 
3646 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
3647   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3648     if (!(GV = GA->getBaseObject()))
3649       return false;
3650   if (const auto *V = dyn_cast<GlobalVariable>(GV))
3651     return V->isConstant();
3652   return isa<Function>(GV);
3653 }
3654 
3655 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3656                                               SelectionDAG &DAG) const {
3657   switch (Subtarget->getTargetTriple().getObjectFormat()) {
3658   default: llvm_unreachable("unknown object format");
3659   case Triple::COFF:
3660     return LowerGlobalAddressWindows(Op, DAG);
3661   case Triple::ELF:
3662     return LowerGlobalAddressELF(Op, DAG);
3663   case Triple::MachO:
3664     return LowerGlobalAddressDarwin(Op, DAG);
3665   }
3666 }
3667 
3668 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3669                                                  SelectionDAG &DAG) const {
3670   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3671   SDLoc dl(Op);
3672   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3673   const TargetMachine &TM = getTargetMachine();
3674   bool IsRO = isReadOnly(GV);
3675 
3676   // promoteToConstantPool only if not generating XO text section
3677   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3678     if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
3679       return V;
3680 
3681   if (isPositionIndependent()) {
3682     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3683     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3684                                            UseGOT_PREL ? ARMII::MO_GOT : 0);
3685     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3686     if (UseGOT_PREL)
3687       Result =
3688           DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3689                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3690     return Result;
3691   } else if (Subtarget->isROPI() && IsRO) {
3692     // PC-relative.
3693     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3694     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3695     return Result;
3696   } else if (Subtarget->isRWPI() && !IsRO) {
3697     // SB-relative.
3698     SDValue RelAddr;
3699     if (Subtarget->useMovt()) {
3700       ++NumMovwMovt;
3701       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3702       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3703     } else { // use literal pool for address constant
3704       ARMConstantPoolValue *CPV =
3705         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3706       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3707       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3708       RelAddr = DAG.getLoad(
3709           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3710           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3711     }
3712     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3713     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3714     return Result;
3715   }
3716 
3717   // If we have T2 ops, we can materialize the address directly via movt/movw
3718   // pair. This is always cheaper.
3719   if (Subtarget->useMovt()) {
3720     ++NumMovwMovt;
3721     // FIXME: Once remat is capable of dealing with instructions with register
3722     // operands, expand this into two nodes.
3723     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3724                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3725   } else {
3726     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4));
3727     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3728     return DAG.getLoad(
3729         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3730         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3731   }
3732 }
3733 
3734 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3735                                                     SelectionDAG &DAG) const {
3736   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3737          "ROPI/RWPI not currently supported for Darwin");
3738   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3739   SDLoc dl(Op);
3740   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3741 
3742   if (Subtarget->useMovt())
3743     ++NumMovwMovt;
3744 
3745   // FIXME: Once remat is capable of dealing with instructions with register
3746   // operands, expand this into multiple nodes
3747   unsigned Wrapper =
3748       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3749 
3750   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3751   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3752 
3753   if (Subtarget->isGVIndirectSymbol(GV))
3754     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3755                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3756   return Result;
3757 }
3758 
3759 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3760                                                      SelectionDAG &DAG) const {
3761   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3762   assert(Subtarget->useMovt() &&
3763          "Windows on ARM expects to use movw/movt");
3764   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3765          "ROPI/RWPI not currently supported for Windows");
3766 
3767   const TargetMachine &TM = getTargetMachine();
3768   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3769   ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
3770   if (GV->hasDLLImportStorageClass())
3771     TargetFlags = ARMII::MO_DLLIMPORT;
3772   else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
3773     TargetFlags = ARMII::MO_COFFSTUB;
3774   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3775   SDValue Result;
3776   SDLoc DL(Op);
3777 
3778   ++NumMovwMovt;
3779 
3780   // FIXME: Once remat is capable of dealing with instructions with register
3781   // operands, expand this into two nodes.
3782   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3783                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
3784                                                   TargetFlags));
3785   if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
3786     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3787                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3788   return Result;
3789 }
3790 
3791 SDValue
3792 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3793   SDLoc dl(Op);
3794   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3795   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3796                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3797                      Op.getOperand(1), Val);
3798 }
3799 
3800 SDValue
3801 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3802   SDLoc dl(Op);
3803   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3804                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3805 }
3806 
3807 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3808                                                       SelectionDAG &DAG) const {
3809   SDLoc dl(Op);
3810   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3811                      Op.getOperand(0));
3812 }
3813 
3814 SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
3815     SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {
3816   unsigned IntNo =
3817       cast<ConstantSDNode>(
3818           Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other))
3819           ->getZExtValue();
3820   switch (IntNo) {
3821     default:
3822       return SDValue();  // Don't custom lower most intrinsics.
3823     case Intrinsic::arm_gnu_eabi_mcount: {
3824       MachineFunction &MF = DAG.getMachineFunction();
3825       EVT PtrVT = getPointerTy(DAG.getDataLayout());
3826       SDLoc dl(Op);
3827       SDValue Chain = Op.getOperand(0);
3828       // call "\01__gnu_mcount_nc"
3829       const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
3830       const uint32_t *Mask =
3831           ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
3832       assert(Mask && "Missing call preserved mask for calling convention");
3833       // Mark LR an implicit live-in.
3834       unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
3835       SDValue ReturnAddress =
3836           DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
3837       constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
3838       SDValue Callee =
3839           DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0);
3840       SDValue RegisterMask = DAG.getRegisterMask(Mask);
3841       if (Subtarget->isThumb())
3842         return SDValue(
3843             DAG.getMachineNode(
3844                 ARM::tBL_PUSHLR, dl, ResultTys,
3845                 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
3846                  DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
3847             0);
3848       return SDValue(
3849           DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys,
3850                              {ReturnAddress, Callee, RegisterMask, Chain}),
3851           0);
3852     }
3853   }
3854 }
3855 
3856 SDValue
3857 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3858                                           const ARMSubtarget *Subtarget) const {
3859   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3860   SDLoc dl(Op);
3861   switch (IntNo) {
3862   default: return SDValue();    // Don't custom lower most intrinsics.
3863   case Intrinsic::thread_pointer: {
3864     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3865     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3866   }
3867   case Intrinsic::arm_cls: {
3868     const SDValue &Operand = Op.getOperand(1);
3869     const EVT VTy = Op.getValueType();
3870     SDValue SRA =
3871         DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy));
3872     SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand);
3873     SDValue SHL =
3874         DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy));
3875     SDValue OR =
3876         DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy));
3877     SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR);
3878     return Result;
3879   }
3880   case Intrinsic::arm_cls64: {
3881     // cls(x) = if cls(hi(x)) != 31 then cls(hi(x))
3882     //          else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x)))
3883     const SDValue &Operand = Op.getOperand(1);
3884     const EVT VTy = Op.getValueType();
3885 
3886     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3887                              DAG.getConstant(1, dl, VTy));
3888     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3889                              DAG.getConstant(0, dl, VTy));
3890     SDValue Constant0 = DAG.getConstant(0, dl, VTy);
3891     SDValue Constant1 = DAG.getConstant(1, dl, VTy);
3892     SDValue Constant31 = DAG.getConstant(31, dl, VTy);
3893     SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31);
3894     SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi);
3895     SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1);
3896     SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1);
3897     SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi);
3898     SDValue CheckLo =
3899         DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ);
3900     SDValue HiIsZero =
3901         DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ);
3902     SDValue AdjustedLo =
3903         DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy));
3904     SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo);
3905     SDValue Result =
3906         DAG.getSelect(dl, VTy, CheckLo,
3907                       DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi);
3908     return Result;
3909   }
3910   case Intrinsic::eh_sjlj_lsda: {
3911     MachineFunction &MF = DAG.getMachineFunction();
3912     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3913     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3914     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3915     SDValue CPAddr;
3916     bool IsPositionIndependent = isPositionIndependent();
3917     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3918     ARMConstantPoolValue *CPV =
3919       ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
3920                                       ARMCP::CPLSDA, PCAdj);
3921     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3922     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3923     SDValue Result = DAG.getLoad(
3924         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3925         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3926 
3927     if (IsPositionIndependent) {
3928       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3929       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3930     }
3931     return Result;
3932   }
3933   case Intrinsic::arm_neon_vabs:
3934     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3935                         Op.getOperand(1));
3936   case Intrinsic::arm_neon_vmulls:
3937   case Intrinsic::arm_neon_vmullu: {
3938     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3939       ? ARMISD::VMULLs : ARMISD::VMULLu;
3940     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3941                        Op.getOperand(1), Op.getOperand(2));
3942   }
3943   case Intrinsic::arm_neon_vminnm:
3944   case Intrinsic::arm_neon_vmaxnm: {
3945     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3946       ? ISD::FMINNUM : ISD::FMAXNUM;
3947     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3948                        Op.getOperand(1), Op.getOperand(2));
3949   }
3950   case Intrinsic::arm_neon_vminu:
3951   case Intrinsic::arm_neon_vmaxu: {
3952     if (Op.getValueType().isFloatingPoint())
3953       return SDValue();
3954     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3955       ? ISD::UMIN : ISD::UMAX;
3956     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3957                          Op.getOperand(1), Op.getOperand(2));
3958   }
3959   case Intrinsic::arm_neon_vmins:
3960   case Intrinsic::arm_neon_vmaxs: {
3961     // v{min,max}s is overloaded between signed integers and floats.
3962     if (!Op.getValueType().isFloatingPoint()) {
3963       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3964         ? ISD::SMIN : ISD::SMAX;
3965       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3966                          Op.getOperand(1), Op.getOperand(2));
3967     }
3968     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3969       ? ISD::FMINIMUM : ISD::FMAXIMUM;
3970     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3971                        Op.getOperand(1), Op.getOperand(2));
3972   }
3973   case Intrinsic::arm_neon_vtbl1:
3974     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3975                        Op.getOperand(1), Op.getOperand(2));
3976   case Intrinsic::arm_neon_vtbl2:
3977     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3978                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3979   case Intrinsic::arm_mve_pred_i2v:
3980   case Intrinsic::arm_mve_pred_v2i:
3981     return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(),
3982                        Op.getOperand(1));
3983   case Intrinsic::arm_mve_vreinterpretq:
3984     return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(),
3985                        Op.getOperand(1));
3986   case Intrinsic::arm_mve_lsll:
3987     return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(),
3988                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3989   case Intrinsic::arm_mve_asrl:
3990     return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(),
3991                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3992   }
3993 }
3994 
3995 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3996                                  const ARMSubtarget *Subtarget) {
3997   SDLoc dl(Op);
3998   ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
3999   auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
4000   if (SSID == SyncScope::SingleThread)
4001     return Op;
4002 
4003   if (!Subtarget->hasDataBarrier()) {
4004     // Some ARMv6 cpus can support data barriers with an mcr instruction.
4005     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
4006     // here.
4007     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
4008            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
4009     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
4010                        DAG.getConstant(0, dl, MVT::i32));
4011   }
4012 
4013   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
4014   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
4015   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
4016   if (Subtarget->isMClass()) {
4017     // Only a full system barrier exists in the M-class architectures.
4018     Domain = ARM_MB::SY;
4019   } else if (Subtarget->preferISHSTBarriers() &&
4020              Ord == AtomicOrdering::Release) {
4021     // Swift happens to implement ISHST barriers in a way that's compatible with
4022     // Release semantics but weaker than ISH so we'd be fools not to use
4023     // it. Beware: other processors probably don't!
4024     Domain = ARM_MB::ISHST;
4025   }
4026 
4027   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
4028                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
4029                      DAG.getConstant(Domain, dl, MVT::i32));
4030 }
4031 
4032 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
4033                              const ARMSubtarget *Subtarget) {
4034   // ARM pre v5TE and Thumb1 does not have preload instructions.
4035   if (!(Subtarget->isThumb2() ||
4036         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
4037     // Just preserve the chain.
4038     return Op.getOperand(0);
4039 
4040   SDLoc dl(Op);
4041   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
4042   if (!isRead &&
4043       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
4044     // ARMv7 with MP extension has PLDW.
4045     return Op.getOperand(0);
4046 
4047   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
4048   if (Subtarget->isThumb()) {
4049     // Invert the bits.
4050     isRead = ~isRead & 1;
4051     isData = ~isData & 1;
4052   }
4053 
4054   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
4055                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
4056                      DAG.getConstant(isData, dl, MVT::i32));
4057 }
4058 
4059 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
4060   MachineFunction &MF = DAG.getMachineFunction();
4061   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
4062 
4063   // vastart just stores the address of the VarArgsFrameIndex slot into the
4064   // memory location argument.
4065   SDLoc dl(Op);
4066   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4067   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4068   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4069   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
4070                       MachinePointerInfo(SV));
4071 }
4072 
4073 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
4074                                                 CCValAssign &NextVA,
4075                                                 SDValue &Root,
4076                                                 SelectionDAG &DAG,
4077                                                 const SDLoc &dl) const {
4078   MachineFunction &MF = DAG.getMachineFunction();
4079   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4080 
4081   const TargetRegisterClass *RC;
4082   if (AFI->isThumb1OnlyFunction())
4083     RC = &ARM::tGPRRegClass;
4084   else
4085     RC = &ARM::GPRRegClass;
4086 
4087   // Transform the arguments stored in physical registers into virtual ones.
4088   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4089   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4090 
4091   SDValue ArgValue2;
4092   if (NextVA.isMemLoc()) {
4093     MachineFrameInfo &MFI = MF.getFrameInfo();
4094     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
4095 
4096     // Create load node to retrieve arguments from the stack.
4097     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4098     ArgValue2 = DAG.getLoad(
4099         MVT::i32, dl, Root, FIN,
4100         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4101   } else {
4102     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
4103     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4104   }
4105   if (!Subtarget->isLittle())
4106     std::swap (ArgValue, ArgValue2);
4107   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
4108 }
4109 
4110 // The remaining GPRs hold either the beginning of variable-argument
4111 // data, or the beginning of an aggregate passed by value (usually
4112 // byval).  Either way, we allocate stack slots adjacent to the data
4113 // provided by our caller, and store the unallocated registers there.
4114 // If this is a variadic function, the va_list pointer will begin with
4115 // these values; otherwise, this reassembles a (byval) structure that
4116 // was split between registers and memory.
4117 // Return: The frame index registers were stored into.
4118 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
4119                                       const SDLoc &dl, SDValue &Chain,
4120                                       const Value *OrigArg,
4121                                       unsigned InRegsParamRecordIdx,
4122                                       int ArgOffset, unsigned ArgSize) const {
4123   // Currently, two use-cases possible:
4124   // Case #1. Non-var-args function, and we meet first byval parameter.
4125   //          Setup first unallocated register as first byval register;
4126   //          eat all remained registers
4127   //          (these two actions are performed by HandleByVal method).
4128   //          Then, here, we initialize stack frame with
4129   //          "store-reg" instructions.
4130   // Case #2. Var-args function, that doesn't contain byval parameters.
4131   //          The same: eat all remained unallocated registers,
4132   //          initialize stack frame.
4133 
4134   MachineFunction &MF = DAG.getMachineFunction();
4135   MachineFrameInfo &MFI = MF.getFrameInfo();
4136   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4137   unsigned RBegin, REnd;
4138   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
4139     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
4140   } else {
4141     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4142     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
4143     REnd = ARM::R4;
4144   }
4145 
4146   if (REnd != RBegin)
4147     ArgOffset = -4 * (ARM::R4 - RBegin);
4148 
4149   auto PtrVT = getPointerTy(DAG.getDataLayout());
4150   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
4151   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
4152 
4153   SmallVector<SDValue, 4> MemOps;
4154   const TargetRegisterClass *RC =
4155       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
4156 
4157   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
4158     unsigned VReg = MF.addLiveIn(Reg, RC);
4159     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4160     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4161                                  MachinePointerInfo(OrigArg, 4 * i));
4162     MemOps.push_back(Store);
4163     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
4164   }
4165 
4166   if (!MemOps.empty())
4167     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4168   return FrameIndex;
4169 }
4170 
4171 // Setup stack frame, the va_list pointer will start from.
4172 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
4173                                              const SDLoc &dl, SDValue &Chain,
4174                                              unsigned ArgOffset,
4175                                              unsigned TotalArgRegsSaveSize,
4176                                              bool ForceMutable) const {
4177   MachineFunction &MF = DAG.getMachineFunction();
4178   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4179 
4180   // Try to store any remaining integer argument regs
4181   // to their spots on the stack so that they may be loaded by dereferencing
4182   // the result of va_next.
4183   // If there is no regs to be stored, just point address after last
4184   // argument passed via stack.
4185   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
4186                                   CCInfo.getInRegsParamsCount(),
4187                                   CCInfo.getNextStackOffset(),
4188                                   std::max(4U, TotalArgRegsSaveSize));
4189   AFI->setVarArgsFrameIndex(FrameIndex);
4190 }
4191 
4192 bool ARMTargetLowering::splitValueIntoRegisterParts(
4193     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4194     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
4195   bool IsABIRegCopy = CC.hasValue();
4196   EVT ValueVT = Val.getValueType();
4197   if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
4198       PartVT == MVT::f32) {
4199     unsigned ValueBits = ValueVT.getSizeInBits();
4200     unsigned PartBits = PartVT.getSizeInBits();
4201     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
4202     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
4203     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
4204     Parts[0] = Val;
4205     return true;
4206   }
4207   return false;
4208 }
4209 
4210 SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
4211     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
4212     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
4213   bool IsABIRegCopy = CC.hasValue();
4214   if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
4215       PartVT == MVT::f32) {
4216     unsigned ValueBits = ValueVT.getSizeInBits();
4217     unsigned PartBits = PartVT.getSizeInBits();
4218     SDValue Val = Parts[0];
4219 
4220     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
4221     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
4222     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
4223     return Val;
4224   }
4225   return SDValue();
4226 }
4227 
4228 SDValue ARMTargetLowering::LowerFormalArguments(
4229     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4230     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4231     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4232   MachineFunction &MF = DAG.getMachineFunction();
4233   MachineFrameInfo &MFI = MF.getFrameInfo();
4234 
4235   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4236 
4237   // Assign locations to all of the incoming arguments.
4238   SmallVector<CCValAssign, 16> ArgLocs;
4239   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
4240                  *DAG.getContext());
4241   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
4242 
4243   SmallVector<SDValue, 16> ArgValues;
4244   SDValue ArgValue;
4245   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
4246   unsigned CurArgIdx = 0;
4247 
4248   // Initially ArgRegsSaveSize is zero.
4249   // Then we increase this value each time we meet byval parameter.
4250   // We also increase this value in case of varargs function.
4251   AFI->setArgRegsSaveSize(0);
4252 
4253   // Calculate the amount of stack space that we need to allocate to store
4254   // byval and variadic arguments that are passed in registers.
4255   // We need to know this before we allocate the first byval or variadic
4256   // argument, as they will be allocated a stack slot below the CFA (Canonical
4257   // Frame Address, the stack pointer at entry to the function).
4258   unsigned ArgRegBegin = ARM::R4;
4259   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4260     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
4261       break;
4262 
4263     CCValAssign &VA = ArgLocs[i];
4264     unsigned Index = VA.getValNo();
4265     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
4266     if (!Flags.isByVal())
4267       continue;
4268 
4269     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
4270     unsigned RBegin, REnd;
4271     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
4272     ArgRegBegin = std::min(ArgRegBegin, RBegin);
4273 
4274     CCInfo.nextInRegsParam();
4275   }
4276   CCInfo.rewindByValRegsInfo();
4277 
4278   int lastInsIndex = -1;
4279   if (isVarArg && MFI.hasVAStart()) {
4280     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4281     if (RegIdx != array_lengthof(GPRArgRegs))
4282       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
4283   }
4284 
4285   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
4286   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
4287   auto PtrVT = getPointerTy(DAG.getDataLayout());
4288 
4289   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4290     CCValAssign &VA = ArgLocs[i];
4291     if (Ins[VA.getValNo()].isOrigArg()) {
4292       std::advance(CurOrigArg,
4293                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
4294       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
4295     }
4296     // Arguments stored in registers.
4297     if (VA.isRegLoc()) {
4298       EVT RegVT = VA.getLocVT();
4299 
4300       if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
4301         // f64 and vector types are split up into multiple registers or
4302         // combinations of registers and stack slots.
4303         SDValue ArgValue1 =
4304             GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4305         VA = ArgLocs[++i]; // skip ahead to next loc
4306         SDValue ArgValue2;
4307         if (VA.isMemLoc()) {
4308           int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
4309           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4310           ArgValue2 = DAG.getLoad(
4311               MVT::f64, dl, Chain, FIN,
4312               MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4313         } else {
4314           ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4315         }
4316         ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
4317         ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4318                                ArgValue1, DAG.getIntPtrConstant(0, dl));
4319         ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4320                                ArgValue2, DAG.getIntPtrConstant(1, dl));
4321       } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
4322         ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4323       } else {
4324         const TargetRegisterClass *RC;
4325 
4326         if (RegVT == MVT::f16 || RegVT == MVT::bf16)
4327           RC = &ARM::HPRRegClass;
4328         else if (RegVT == MVT::f32)
4329           RC = &ARM::SPRRegClass;
4330         else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 ||
4331                  RegVT == MVT::v4bf16)
4332           RC = &ARM::DPRRegClass;
4333         else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 ||
4334                  RegVT == MVT::v8bf16)
4335           RC = &ARM::QPRRegClass;
4336         else if (RegVT == MVT::i32)
4337           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
4338                                            : &ARM::GPRRegClass;
4339         else
4340           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
4341 
4342         // Transform the arguments in physical registers into virtual ones.
4343         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4344         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4345 
4346         // If this value is passed in r0 and has the returned attribute (e.g.
4347         // C++ 'structors), record this fact for later use.
4348         if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) {
4349           AFI->setPreservesR0();
4350         }
4351       }
4352 
4353       // If this is an 8 or 16-bit value, it is really passed promoted
4354       // to 32 bits.  Insert an assert[sz]ext to capture this, then
4355       // truncate to the right size.
4356       switch (VA.getLocInfo()) {
4357       default: llvm_unreachable("Unknown loc info!");
4358       case CCValAssign::Full: break;
4359       case CCValAssign::BCvt:
4360         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
4361         break;
4362       case CCValAssign::SExt:
4363         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4364                                DAG.getValueType(VA.getValVT()));
4365         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4366         break;
4367       case CCValAssign::ZExt:
4368         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4369                                DAG.getValueType(VA.getValVT()));
4370         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4371         break;
4372       }
4373 
4374       // f16 arguments have their size extended to 4 bytes and passed as if they
4375       // had been copied to the LSBs of a 32-bit register.
4376       // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
4377       if (VA.needsCustom() &&
4378           (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
4379         ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
4380 
4381       InVals.push_back(ArgValue);
4382     } else { // VA.isRegLoc()
4383       // sanity check
4384       assert(VA.isMemLoc());
4385       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
4386 
4387       int index = VA.getValNo();
4388 
4389       // Some Ins[] entries become multiple ArgLoc[] entries.
4390       // Process them only once.
4391       if (index != lastInsIndex)
4392         {
4393           ISD::ArgFlagsTy Flags = Ins[index].Flags;
4394           // FIXME: For now, all byval parameter objects are marked mutable.
4395           // This can be changed with more analysis.
4396           // In case of tail call optimization mark all arguments mutable.
4397           // Since they could be overwritten by lowering of arguments in case of
4398           // a tail call.
4399           if (Flags.isByVal()) {
4400             assert(Ins[index].isOrigArg() &&
4401                    "Byval arguments cannot be implicit");
4402             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
4403 
4404             int FrameIndex = StoreByValRegs(
4405                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
4406                 VA.getLocMemOffset(), Flags.getByValSize());
4407             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
4408             CCInfo.nextInRegsParam();
4409           } else {
4410             unsigned FIOffset = VA.getLocMemOffset();
4411             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
4412                                            FIOffset, true);
4413 
4414             // Create load nodes to retrieve arguments from the stack.
4415             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4416             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
4417                                          MachinePointerInfo::getFixedStack(
4418                                              DAG.getMachineFunction(), FI)));
4419           }
4420           lastInsIndex = index;
4421         }
4422     }
4423   }
4424 
4425   // varargs
4426   if (isVarArg && MFI.hasVAStart())
4427     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
4428                          CCInfo.getNextStackOffset(),
4429                          TotalArgRegsSaveSize);
4430 
4431   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
4432 
4433   return Chain;
4434 }
4435 
4436 /// isFloatingPointZero - Return true if this is +0.0.
4437 static bool isFloatingPointZero(SDValue Op) {
4438   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
4439     return CFP->getValueAPF().isPosZero();
4440   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
4441     // Maybe this has already been legalized into the constant pool?
4442     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
4443       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
4444       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
4445         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
4446           return CFP->getValueAPF().isPosZero();
4447     }
4448   } else if (Op->getOpcode() == ISD::BITCAST &&
4449              Op->getValueType(0) == MVT::f64) {
4450     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
4451     // created by LowerConstantFP().
4452     SDValue BitcastOp = Op->getOperand(0);
4453     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
4454         isNullConstant(BitcastOp->getOperand(0)))
4455       return true;
4456   }
4457   return false;
4458 }
4459 
4460 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
4461 /// the given operands.
4462 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
4463                                      SDValue &ARMcc, SelectionDAG &DAG,
4464                                      const SDLoc &dl) const {
4465   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
4466     unsigned C = RHSC->getZExtValue();
4467     if (!isLegalICmpImmediate((int32_t)C)) {
4468       // Constant does not fit, try adjusting it by one.
4469       switch (CC) {
4470       default: break;
4471       case ISD::SETLT:
4472       case ISD::SETGE:
4473         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
4474           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
4475           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4476         }
4477         break;
4478       case ISD::SETULT:
4479       case ISD::SETUGE:
4480         if (C != 0 && isLegalICmpImmediate(C-1)) {
4481           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
4482           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4483         }
4484         break;
4485       case ISD::SETLE:
4486       case ISD::SETGT:
4487         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
4488           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
4489           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4490         }
4491         break;
4492       case ISD::SETULE:
4493       case ISD::SETUGT:
4494         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
4495           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4496           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4497         }
4498         break;
4499       }
4500     }
4501   } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
4502              (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
4503     // In ARM and Thumb-2, the compare instructions can shift their second
4504     // operand.
4505     CC = ISD::getSetCCSwappedOperands(CC);
4506     std::swap(LHS, RHS);
4507   }
4508 
4509   // Thumb1 has very limited immediate modes, so turning an "and" into a
4510   // shift can save multiple instructions.
4511   //
4512   // If we have (x & C1), and C1 is an appropriate mask, we can transform it
4513   // into "((x << n) >> n)".  But that isn't necessarily profitable on its
4514   // own. If it's the operand to an unsigned comparison with an immediate,
4515   // we can eliminate one of the shifts: we transform
4516   // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
4517   //
4518   // We avoid transforming cases which aren't profitable due to encoding
4519   // details:
4520   //
4521   // 1. C2 fits into the immediate field of a cmp, and the transformed version
4522   // would not; in that case, we're essentially trading one immediate load for
4523   // another.
4524   // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
4525   // 3. C2 is zero; we have other code for this special case.
4526   //
4527   // FIXME: Figure out profitability for Thumb2; we usually can't save an
4528   // instruction, since the AND is always one instruction anyway, but we could
4529   // use narrow instructions in some cases.
4530   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND &&
4531       LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4532       LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
4533       !isSignedIntSetCC(CC)) {
4534     unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
4535     auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
4536     uint64_t RHSV = RHSC->getZExtValue();
4537     if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4538       unsigned ShiftBits = countLeadingZeros(Mask);
4539       if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
4540         SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32);
4541         LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt);
4542         RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32);
4543       }
4544     }
4545   }
4546 
4547   // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
4548   // single "lsls x, c+1".  The shift sets the "C" and "Z" flags the same
4549   // way a cmp would.
4550   // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
4551   // some tweaks to the heuristics for the previous and->shift transform.
4552   // FIXME: Optimize cases where the LHS isn't a shift.
4553   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
4554       isa<ConstantSDNode>(RHS) &&
4555       cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
4556       CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4557       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
4558     unsigned ShiftAmt =
4559       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
4560     SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
4561                                 DAG.getVTList(MVT::i32, MVT::i32),
4562                                 LHS.getOperand(0),
4563                                 DAG.getConstant(ShiftAmt, dl, MVT::i32));
4564     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
4565                                      Shift.getValue(1), SDValue());
4566     ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32);
4567     return Chain.getValue(1);
4568   }
4569 
4570   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4571 
4572   // If the RHS is a constant zero then the V (overflow) flag will never be
4573   // set. This can allow us to simplify GE to PL or LT to MI, which can be
4574   // simpler for other passes (like the peephole optimiser) to deal with.
4575   if (isNullConstant(RHS)) {
4576     switch (CondCode) {
4577       default: break;
4578       case ARMCC::GE:
4579         CondCode = ARMCC::PL;
4580         break;
4581       case ARMCC::LT:
4582         CondCode = ARMCC::MI;
4583         break;
4584     }
4585   }
4586 
4587   ARMISD::NodeType CompareType;
4588   switch (CondCode) {
4589   default:
4590     CompareType = ARMISD::CMP;
4591     break;
4592   case ARMCC::EQ:
4593   case ARMCC::NE:
4594     // Uses only Z Flag
4595     CompareType = ARMISD::CMPZ;
4596     break;
4597   }
4598   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4599   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
4600 }
4601 
4602 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
4603 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
4604                                      SelectionDAG &DAG, const SDLoc &dl,
4605                                      bool Signaling) const {
4606   assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
4607   SDValue Cmp;
4608   if (!isFloatingPointZero(RHS))
4609     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP,
4610                       dl, MVT::Glue, LHS, RHS);
4611   else
4612     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0,
4613                       dl, MVT::Glue, LHS);
4614   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
4615 }
4616 
4617 /// duplicateCmp - Glue values can have only one use, so this function
4618 /// duplicates a comparison node.
4619 SDValue
4620 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
4621   unsigned Opc = Cmp.getOpcode();
4622   SDLoc DL(Cmp);
4623   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
4624     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4625 
4626   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
4627   Cmp = Cmp.getOperand(0);
4628   Opc = Cmp.getOpcode();
4629   if (Opc == ARMISD::CMPFP)
4630     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4631   else {
4632     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
4633     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
4634   }
4635   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
4636 }
4637 
4638 // This function returns three things: the arithmetic computation itself
4639 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
4640 // comparison and the condition code define the case in which the arithmetic
4641 // computation *does not* overflow.
4642 std::pair<SDValue, SDValue>
4643 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
4644                                  SDValue &ARMcc) const {
4645   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
4646 
4647   SDValue Value, OverflowCmp;
4648   SDValue LHS = Op.getOperand(0);
4649   SDValue RHS = Op.getOperand(1);
4650   SDLoc dl(Op);
4651 
4652   // FIXME: We are currently always generating CMPs because we don't support
4653   // generating CMN through the backend. This is not as good as the natural
4654   // CMP case because it causes a register dependency and cannot be folded
4655   // later.
4656 
4657   switch (Op.getOpcode()) {
4658   default:
4659     llvm_unreachable("Unknown overflow instruction!");
4660   case ISD::SADDO:
4661     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4662     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
4663     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4664     break;
4665   case ISD::UADDO:
4666     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4667     // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4668     // We do not use it in the USUBO case as Value may not be used.
4669     Value = DAG.getNode(ARMISD::ADDC, dl,
4670                         DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
4671                 .getValue(0);
4672     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4673     break;
4674   case ISD::SSUBO:
4675     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4676     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4677     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4678     break;
4679   case ISD::USUBO:
4680     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4681     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4682     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4683     break;
4684   case ISD::UMULO:
4685     // We generate a UMUL_LOHI and then check if the high word is 0.
4686     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4687     Value = DAG.getNode(ISD::UMUL_LOHI, dl,
4688                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4689                         LHS, RHS);
4690     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4691                               DAG.getConstant(0, dl, MVT::i32));
4692     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4693     break;
4694   case ISD::SMULO:
4695     // We generate a SMUL_LOHI and then check if all the bits of the high word
4696     // are the same as the sign bit of the low word.
4697     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4698     Value = DAG.getNode(ISD::SMUL_LOHI, dl,
4699                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4700                         LHS, RHS);
4701     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4702                               DAG.getNode(ISD::SRA, dl, Op.getValueType(),
4703                                           Value.getValue(0),
4704                                           DAG.getConstant(31, dl, MVT::i32)));
4705     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4706     break;
4707   } // switch (...)
4708 
4709   return std::make_pair(Value, OverflowCmp);
4710 }
4711 
4712 SDValue
4713 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
4714   // Let legalize expand this if it isn't a legal type yet.
4715   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4716     return SDValue();
4717 
4718   SDValue Value, OverflowCmp;
4719   SDValue ARMcc;
4720   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4721   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4722   SDLoc dl(Op);
4723   // We use 0 and 1 as false and true values.
4724   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
4725   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
4726   EVT VT = Op.getValueType();
4727 
4728   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
4729                                  ARMcc, CCR, OverflowCmp);
4730 
4731   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
4732   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4733 }
4734 
4735 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
4736                                               SelectionDAG &DAG) {
4737   SDLoc DL(BoolCarry);
4738   EVT CarryVT = BoolCarry.getValueType();
4739 
4740   // This converts the boolean value carry into the carry flag by doing
4741   // ARMISD::SUBC Carry, 1
4742   SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
4743                               DAG.getVTList(CarryVT, MVT::i32),
4744                               BoolCarry, DAG.getConstant(1, DL, CarryVT));
4745   return Carry.getValue(1);
4746 }
4747 
4748 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
4749                                               SelectionDAG &DAG) {
4750   SDLoc DL(Flags);
4751 
4752   // Now convert the carry flag into a boolean carry. We do this
4753   // using ARMISD:ADDE 0, 0, Carry
4754   return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
4755                      DAG.getConstant(0, DL, MVT::i32),
4756                      DAG.getConstant(0, DL, MVT::i32), Flags);
4757 }
4758 
4759 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
4760                                              SelectionDAG &DAG) const {
4761   // Let legalize expand this if it isn't a legal type yet.
4762   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4763     return SDValue();
4764 
4765   SDValue LHS = Op.getOperand(0);
4766   SDValue RHS = Op.getOperand(1);
4767   SDLoc dl(Op);
4768 
4769   EVT VT = Op.getValueType();
4770   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4771   SDValue Value;
4772   SDValue Overflow;
4773   switch (Op.getOpcode()) {
4774   default:
4775     llvm_unreachable("Unknown overflow instruction!");
4776   case ISD::UADDO:
4777     Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
4778     // Convert the carry flag into a boolean value.
4779     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4780     break;
4781   case ISD::USUBO: {
4782     Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
4783     // Convert the carry flag into a boolean value.
4784     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4785     // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
4786     // value. So compute 1 - C.
4787     Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
4788                            DAG.getConstant(1, dl, MVT::i32), Overflow);
4789     break;
4790   }
4791   }
4792 
4793   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4794 }
4795 
4796 static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG,
4797                                const ARMSubtarget *Subtarget) {
4798   EVT VT = Op.getValueType();
4799   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
4800     return SDValue();
4801   if (!VT.isSimple())
4802     return SDValue();
4803 
4804   unsigned NewOpcode;
4805   bool IsAdd = Op->getOpcode() == ISD::SADDSAT;
4806   switch (VT.getSimpleVT().SimpleTy) {
4807   default:
4808     return SDValue();
4809   case MVT::i8:
4810     NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b;
4811     break;
4812   case MVT::i16:
4813     NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b;
4814     break;
4815   }
4816 
4817   SDLoc dl(Op);
4818   SDValue Add =
4819       DAG.getNode(NewOpcode, dl, MVT::i32,
4820                   DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32),
4821                   DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32));
4822   return DAG.getNode(ISD::TRUNCATE, dl, VT, Add);
4823 }
4824 
4825 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
4826   SDValue Cond = Op.getOperand(0);
4827   SDValue SelectTrue = Op.getOperand(1);
4828   SDValue SelectFalse = Op.getOperand(2);
4829   SDLoc dl(Op);
4830   unsigned Opc = Cond.getOpcode();
4831 
4832   if (Cond.getResNo() == 1 &&
4833       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4834        Opc == ISD::USUBO)) {
4835     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
4836       return SDValue();
4837 
4838     SDValue Value, OverflowCmp;
4839     SDValue ARMcc;
4840     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4841     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4842     EVT VT = Op.getValueType();
4843 
4844     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4845                    OverflowCmp, DAG);
4846   }
4847 
4848   // Convert:
4849   //
4850   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4851   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4852   //
4853   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4854     const ConstantSDNode *CMOVTrue =
4855       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4856     const ConstantSDNode *CMOVFalse =
4857       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4858 
4859     if (CMOVTrue && CMOVFalse) {
4860       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4861       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4862 
4863       SDValue True;
4864       SDValue False;
4865       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4866         True = SelectTrue;
4867         False = SelectFalse;
4868       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4869         True = SelectFalse;
4870         False = SelectTrue;
4871       }
4872 
4873       if (True.getNode() && False.getNode()) {
4874         EVT VT = Op.getValueType();
4875         SDValue ARMcc = Cond.getOperand(2);
4876         SDValue CCR = Cond.getOperand(3);
4877         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4878         assert(True.getValueType() == VT);
4879         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4880       }
4881     }
4882   }
4883 
4884   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4885   // undefined bits before doing a full-word comparison with zero.
4886   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4887                      DAG.getConstant(1, dl, Cond.getValueType()));
4888 
4889   return DAG.getSelectCC(dl, Cond,
4890                          DAG.getConstant(0, dl, Cond.getValueType()),
4891                          SelectTrue, SelectFalse, ISD::SETNE);
4892 }
4893 
4894 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4895                                  bool &swpCmpOps, bool &swpVselOps) {
4896   // Start by selecting the GE condition code for opcodes that return true for
4897   // 'equality'
4898   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4899       CC == ISD::SETULE || CC == ISD::SETGE  || CC == ISD::SETLE)
4900     CondCode = ARMCC::GE;
4901 
4902   // and GT for opcodes that return false for 'equality'.
4903   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4904            CC == ISD::SETULT || CC == ISD::SETGT  || CC == ISD::SETLT)
4905     CondCode = ARMCC::GT;
4906 
4907   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4908   // to swap the compare operands.
4909   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4910       CC == ISD::SETULT || CC == ISD::SETLE  || CC == ISD::SETLT)
4911     swpCmpOps = true;
4912 
4913   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4914   // If we have an unordered opcode, we need to swap the operands to the VSEL
4915   // instruction (effectively negating the condition).
4916   //
4917   // This also has the effect of swapping which one of 'less' or 'greater'
4918   // returns true, so we also swap the compare operands. It also switches
4919   // whether we return true for 'equality', so we compensate by picking the
4920   // opposite condition code to our original choice.
4921   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4922       CC == ISD::SETUGT) {
4923     swpCmpOps = !swpCmpOps;
4924     swpVselOps = !swpVselOps;
4925     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4926   }
4927 
4928   // 'ordered' is 'anything but unordered', so use the VS condition code and
4929   // swap the VSEL operands.
4930   if (CC == ISD::SETO) {
4931     CondCode = ARMCC::VS;
4932     swpVselOps = true;
4933   }
4934 
4935   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4936   // code and swap the VSEL operands. Also do this if we don't care about the
4937   // unordered case.
4938   if (CC == ISD::SETUNE || CC == ISD::SETNE) {
4939     CondCode = ARMCC::EQ;
4940     swpVselOps = true;
4941   }
4942 }
4943 
4944 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4945                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4946                                    SDValue Cmp, SelectionDAG &DAG) const {
4947   if (!Subtarget->hasFP64() && VT == MVT::f64) {
4948     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4949                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4950     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4951                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4952 
4953     SDValue TrueLow = TrueVal.getValue(0);
4954     SDValue TrueHigh = TrueVal.getValue(1);
4955     SDValue FalseLow = FalseVal.getValue(0);
4956     SDValue FalseHigh = FalseVal.getValue(1);
4957 
4958     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4959                               ARMcc, CCR, Cmp);
4960     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4961                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
4962 
4963     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4964   } else {
4965     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4966                        Cmp);
4967   }
4968 }
4969 
4970 static bool isGTorGE(ISD::CondCode CC) {
4971   return CC == ISD::SETGT || CC == ISD::SETGE;
4972 }
4973 
4974 static bool isLTorLE(ISD::CondCode CC) {
4975   return CC == ISD::SETLT || CC == ISD::SETLE;
4976 }
4977 
4978 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4979 // All of these conditions (and their <= and >= counterparts) will do:
4980 //          x < k ? k : x
4981 //          x > k ? x : k
4982 //          k < x ? x : k
4983 //          k > x ? k : x
4984 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4985                             const SDValue TrueVal, const SDValue FalseVal,
4986                             const ISD::CondCode CC, const SDValue K) {
4987   return (isGTorGE(CC) &&
4988           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4989          (isLTorLE(CC) &&
4990           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4991 }
4992 
4993 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4994 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4995                             const SDValue TrueVal, const SDValue FalseVal,
4996                             const ISD::CondCode CC, const SDValue K) {
4997   return (isGTorGE(CC) &&
4998           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4999          (isLTorLE(CC) &&
5000           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
5001 }
5002 
5003 // Check if two chained conditionals could be converted into SSAT or USAT.
5004 //
5005 // SSAT can replace a set of two conditional selectors that bound a number to an
5006 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
5007 //
5008 //     x < -k ? -k : (x > k ? k : x)
5009 //     x < -k ? -k : (x < k ? x : k)
5010 //     x > -k ? (x > k ? k : x) : -k
5011 //     x < k ? (x < -k ? -k : x) : k
5012 //     etc.
5013 //
5014 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
5015 // a power of 2.
5016 //
5017 // It returns true if the conversion can be done, false otherwise.
5018 // Additionally, the variable is returned in parameter V, the constant in K and
5019 // usat is set to true if the conditional represents an unsigned saturation
5020 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
5021                                     uint64_t &K, bool &usat) {
5022   SDValue LHS1 = Op.getOperand(0);
5023   SDValue RHS1 = Op.getOperand(1);
5024   SDValue TrueVal1 = Op.getOperand(2);
5025   SDValue FalseVal1 = Op.getOperand(3);
5026   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5027 
5028   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
5029   if (Op2.getOpcode() != ISD::SELECT_CC)
5030     return false;
5031 
5032   SDValue LHS2 = Op2.getOperand(0);
5033   SDValue RHS2 = Op2.getOperand(1);
5034   SDValue TrueVal2 = Op2.getOperand(2);
5035   SDValue FalseVal2 = Op2.getOperand(3);
5036   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
5037 
5038   // Find out which are the constants and which are the variables
5039   // in each conditional
5040   SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
5041                                                         ? &RHS1
5042                                                         : nullptr;
5043   SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
5044                                                         ? &RHS2
5045                                                         : nullptr;
5046   SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
5047   SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
5048   SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
5049   SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
5050 
5051   // We must detect cases where the original operations worked with 16- or
5052   // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
5053   // must work with sign-extended values but the select operations return
5054   // the original non-extended value.
5055   SDValue V2TmpReg = V2Tmp;
5056   if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
5057     V2TmpReg = V2Tmp->getOperand(0);
5058 
5059   // Check that the registers and the constants have the correct values
5060   // in both conditionals
5061   if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
5062       V2TmpReg != V2)
5063     return false;
5064 
5065   // Figure out which conditional is saturating the lower/upper bound.
5066   const SDValue *LowerCheckOp =
5067       isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
5068           ? &Op
5069           : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
5070                 ? &Op2
5071                 : nullptr;
5072   const SDValue *UpperCheckOp =
5073       isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
5074           ? &Op
5075           : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
5076                 ? &Op2
5077                 : nullptr;
5078 
5079   if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
5080     return false;
5081 
5082   // Check that the constant in the lower-bound check is
5083   // the opposite of the constant in the upper-bound check
5084   // in 1's complement.
5085   int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
5086   int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
5087   int64_t PosVal = std::max(Val1, Val2);
5088   int64_t NegVal = std::min(Val1, Val2);
5089 
5090   if (((Val1 > Val2 && UpperCheckOp == &Op) ||
5091        (Val1 < Val2 && UpperCheckOp == &Op2)) &&
5092       isPowerOf2_64(PosVal + 1)) {
5093 
5094     // Handle the difference between USAT (unsigned) and SSAT (signed) saturation
5095     if (Val1 == ~Val2)
5096       usat = false;
5097     else if (NegVal == 0)
5098       usat = true;
5099     else
5100       return false;
5101 
5102     V = V2;
5103     K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
5104 
5105     return true;
5106   }
5107 
5108   return false;
5109 }
5110 
5111 // Check if a condition of the type x < k ? k : x can be converted into a
5112 // bit operation instead of conditional moves.
5113 // Currently this is allowed given:
5114 // - The conditions and values match up
5115 // - k is 0 or -1 (all ones)
5116 // This function will not check the last condition, thats up to the caller
5117 // It returns true if the transformation can be made, and in such case
5118 // returns x in V, and k in SatK.
5119 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
5120                                          SDValue &SatK)
5121 {
5122   SDValue LHS = Op.getOperand(0);
5123   SDValue RHS = Op.getOperand(1);
5124   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5125   SDValue TrueVal = Op.getOperand(2);
5126   SDValue FalseVal = Op.getOperand(3);
5127 
5128   SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
5129                                                ? &RHS
5130                                                : nullptr;
5131 
5132   // No constant operation in comparison, early out
5133   if (!K)
5134     return false;
5135 
5136   SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
5137   V = (KTmp == TrueVal) ? FalseVal : TrueVal;
5138   SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
5139 
5140   // If the constant on left and right side, or variable on left and right,
5141   // does not match, early out
5142   if (*K != KTmp || V != VTmp)
5143     return false;
5144 
5145   if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
5146     SatK = *K;
5147     return true;
5148   }
5149 
5150   return false;
5151 }
5152 
5153 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
5154   if (VT == MVT::f32)
5155     return !Subtarget->hasVFP2Base();
5156   if (VT == MVT::f64)
5157     return !Subtarget->hasFP64();
5158   if (VT == MVT::f16)
5159     return !Subtarget->hasFullFP16();
5160   return false;
5161 }
5162 
5163 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
5164   EVT VT = Op.getValueType();
5165   SDLoc dl(Op);
5166 
5167   // Try to convert two saturating conditional selects into a single SSAT
5168   SDValue SatValue;
5169   uint64_t SatConstant;
5170   bool SatUSat;
5171   if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
5172       isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) {
5173     if (SatUSat)
5174       return DAG.getNode(ARMISD::USAT, dl, VT, SatValue,
5175                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
5176     else
5177       return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
5178                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
5179   }
5180 
5181   // Try to convert expressions of the form x < k ? k : x (and similar forms)
5182   // into more efficient bit operations, which is possible when k is 0 or -1
5183   // On ARM and Thumb-2 which have flexible operand 2 this will result in
5184   // single instructions. On Thumb the shift and the bit operation will be two
5185   // instructions.
5186   // Only allow this transformation on full-width (32-bit) operations
5187   SDValue LowerSatConstant;
5188   if (VT == MVT::i32 &&
5189       isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
5190     SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
5191                                  DAG.getConstant(31, dl, VT));
5192     if (isNullConstant(LowerSatConstant)) {
5193       SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
5194                                       DAG.getAllOnesConstant(dl, VT));
5195       return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
5196     } else if (isAllOnesConstant(LowerSatConstant))
5197       return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
5198   }
5199 
5200   SDValue LHS = Op.getOperand(0);
5201   SDValue RHS = Op.getOperand(1);
5202   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5203   SDValue TrueVal = Op.getOperand(2);
5204   SDValue FalseVal = Op.getOperand(3);
5205   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal);
5206   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal);
5207 
5208   if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal &&
5209       LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) {
5210     unsigned TVal = CTVal->getZExtValue();
5211     unsigned FVal = CFVal->getZExtValue();
5212     unsigned Opcode = 0;
5213 
5214     if (TVal == ~FVal) {
5215       Opcode = ARMISD::CSINV;
5216     } else if (TVal == ~FVal + 1) {
5217       Opcode = ARMISD::CSNEG;
5218     } else if (TVal + 1 == FVal) {
5219       Opcode = ARMISD::CSINC;
5220     } else if (TVal == FVal + 1) {
5221       Opcode = ARMISD::CSINC;
5222       std::swap(TrueVal, FalseVal);
5223       std::swap(TVal, FVal);
5224       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5225     }
5226 
5227     if (Opcode) {
5228       // If one of the constants is cheaper than another, materialise the
5229       // cheaper one and let the csel generate the other.
5230       if (Opcode != ARMISD::CSINC &&
5231           HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) {
5232         std::swap(TrueVal, FalseVal);
5233         std::swap(TVal, FVal);
5234         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5235       }
5236 
5237       // Attempt to use ZR checking TVal is 0, possibly inverting the condition
5238       // to get there. CSINC not is invertable like the other two (~(~a) == a,
5239       // -(-a) == a, but (a+1)+1 != a).
5240       if (FVal == 0 && Opcode != ARMISD::CSINC) {
5241         std::swap(TrueVal, FalseVal);
5242         std::swap(TVal, FVal);
5243         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5244       }
5245       if (TVal == 0)
5246         TrueVal = DAG.getRegister(ARM::ZR, MVT::i32);
5247 
5248       // Drops F's value because we can get it by inverting/negating TVal.
5249       FalseVal = TrueVal;
5250 
5251       SDValue ARMcc;
5252       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5253       EVT VT = TrueVal.getValueType();
5254       return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
5255     }
5256   }
5257 
5258   if (isUnsupportedFloatingType(LHS.getValueType())) {
5259     DAG.getTargetLoweringInfo().softenSetCCOperands(
5260         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5261 
5262     // If softenSetCCOperands only returned one value, we should compare it to
5263     // zero.
5264     if (!RHS.getNode()) {
5265       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5266       CC = ISD::SETNE;
5267     }
5268   }
5269 
5270   if (LHS.getValueType() == MVT::i32) {
5271     // Try to generate VSEL on ARMv8.
5272     // The VSEL instruction can't use all the usual ARM condition
5273     // codes: it only has two bits to select the condition code, so it's
5274     // constrained to use only GE, GT, VS and EQ.
5275     //
5276     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
5277     // swap the operands of the previous compare instruction (effectively
5278     // inverting the compare condition, swapping 'less' and 'greater') and
5279     // sometimes need to swap the operands to the VSEL (which inverts the
5280     // condition in the sense of firing whenever the previous condition didn't)
5281     if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
5282                                         TrueVal.getValueType() == MVT::f32 ||
5283                                         TrueVal.getValueType() == MVT::f64)) {
5284       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5285       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
5286           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
5287         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5288         std::swap(TrueVal, FalseVal);
5289       }
5290     }
5291 
5292     SDValue ARMcc;
5293     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5294     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5295     // Choose GE over PL, which vsel does now support
5296     if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
5297       ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
5298     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5299   }
5300 
5301   ARMCC::CondCodes CondCode, CondCode2;
5302   FPCCToARMCC(CC, CondCode, CondCode2);
5303 
5304   // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
5305   // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
5306   // must use VSEL (limited condition codes), due to not having conditional f16
5307   // moves.
5308   if (Subtarget->hasFPARMv8Base() &&
5309       !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
5310       (TrueVal.getValueType() == MVT::f16 ||
5311        TrueVal.getValueType() == MVT::f32 ||
5312        TrueVal.getValueType() == MVT::f64)) {
5313     bool swpCmpOps = false;
5314     bool swpVselOps = false;
5315     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
5316 
5317     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
5318         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
5319       if (swpCmpOps)
5320         std::swap(LHS, RHS);
5321       if (swpVselOps)
5322         std::swap(TrueVal, FalseVal);
5323     }
5324   }
5325 
5326   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5327   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5328   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5329   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5330   if (CondCode2 != ARMCC::AL) {
5331     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
5332     // FIXME: Needs another CMP because flag can have but one use.
5333     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5334     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
5335   }
5336   return Result;
5337 }
5338 
5339 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
5340 /// to morph to an integer compare sequence.
5341 static bool canChangeToInt(SDValue Op, bool &SeenZero,
5342                            const ARMSubtarget *Subtarget) {
5343   SDNode *N = Op.getNode();
5344   if (!N->hasOneUse())
5345     // Otherwise it requires moving the value from fp to integer registers.
5346     return false;
5347   if (!N->getNumValues())
5348     return false;
5349   EVT VT = Op.getValueType();
5350   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
5351     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
5352     // vmrs are very slow, e.g. cortex-a8.
5353     return false;
5354 
5355   if (isFloatingPointZero(Op)) {
5356     SeenZero = true;
5357     return true;
5358   }
5359   return ISD::isNormalLoad(N);
5360 }
5361 
5362 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
5363   if (isFloatingPointZero(Op))
5364     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
5365 
5366   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
5367     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
5368                        Ld->getPointerInfo(), Ld->getAlignment(),
5369                        Ld->getMemOperand()->getFlags());
5370 
5371   llvm_unreachable("Unknown VFP cmp argument!");
5372 }
5373 
5374 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
5375                            SDValue &RetVal1, SDValue &RetVal2) {
5376   SDLoc dl(Op);
5377 
5378   if (isFloatingPointZero(Op)) {
5379     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
5380     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
5381     return;
5382   }
5383 
5384   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
5385     SDValue Ptr = Ld->getBasePtr();
5386     RetVal1 =
5387         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
5388                     Ld->getAlignment(), Ld->getMemOperand()->getFlags());
5389 
5390     EVT PtrType = Ptr.getValueType();
5391     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
5392     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
5393                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
5394     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
5395                           Ld->getPointerInfo().getWithOffset(4), NewAlign,
5396                           Ld->getMemOperand()->getFlags());
5397     return;
5398   }
5399 
5400   llvm_unreachable("Unknown VFP cmp argument!");
5401 }
5402 
5403 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
5404 /// f32 and even f64 comparisons to integer ones.
5405 SDValue
5406 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
5407   SDValue Chain = Op.getOperand(0);
5408   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5409   SDValue LHS = Op.getOperand(2);
5410   SDValue RHS = Op.getOperand(3);
5411   SDValue Dest = Op.getOperand(4);
5412   SDLoc dl(Op);
5413 
5414   bool LHSSeenZero = false;
5415   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
5416   bool RHSSeenZero = false;
5417   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
5418   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
5419     // If unsafe fp math optimization is enabled and there are no other uses of
5420     // the CMP operands, and the condition code is EQ or NE, we can optimize it
5421     // to an integer comparison.
5422     if (CC == ISD::SETOEQ)
5423       CC = ISD::SETEQ;
5424     else if (CC == ISD::SETUNE)
5425       CC = ISD::SETNE;
5426 
5427     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5428     SDValue ARMcc;
5429     if (LHS.getValueType() == MVT::f32) {
5430       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5431                         bitcastf32Toi32(LHS, DAG), Mask);
5432       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5433                         bitcastf32Toi32(RHS, DAG), Mask);
5434       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5435       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5436       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5437                          Chain, Dest, ARMcc, CCR, Cmp);
5438     }
5439 
5440     SDValue LHS1, LHS2;
5441     SDValue RHS1, RHS2;
5442     expandf64Toi32(LHS, DAG, LHS1, LHS2);
5443     expandf64Toi32(RHS, DAG, RHS1, RHS2);
5444     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
5445     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
5446     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5447     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5448     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5449     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
5450     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
5451   }
5452 
5453   return SDValue();
5454 }
5455 
5456 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
5457   SDValue Chain = Op.getOperand(0);
5458   SDValue Cond = Op.getOperand(1);
5459   SDValue Dest = Op.getOperand(2);
5460   SDLoc dl(Op);
5461 
5462   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5463   // instruction.
5464   unsigned Opc = Cond.getOpcode();
5465   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5466                       !Subtarget->isThumb1Only();
5467   if (Cond.getResNo() == 1 &&
5468       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5469        Opc == ISD::USUBO || OptimizeMul)) {
5470     // Only lower legal XALUO ops.
5471     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
5472       return SDValue();
5473 
5474     // The actual operation with overflow check.
5475     SDValue Value, OverflowCmp;
5476     SDValue ARMcc;
5477     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5478 
5479     // Reverse the condition code.
5480     ARMCC::CondCodes CondCode =
5481         (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5482     CondCode = ARMCC::getOppositeCondition(CondCode);
5483     ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5484     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5485 
5486     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5487                        OverflowCmp);
5488   }
5489 
5490   return SDValue();
5491 }
5492 
5493 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
5494   SDValue Chain = Op.getOperand(0);
5495   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5496   SDValue LHS = Op.getOperand(2);
5497   SDValue RHS = Op.getOperand(3);
5498   SDValue Dest = Op.getOperand(4);
5499   SDLoc dl(Op);
5500 
5501   if (isUnsupportedFloatingType(LHS.getValueType())) {
5502     DAG.getTargetLoweringInfo().softenSetCCOperands(
5503         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5504 
5505     // If softenSetCCOperands only returned one value, we should compare it to
5506     // zero.
5507     if (!RHS.getNode()) {
5508       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5509       CC = ISD::SETNE;
5510     }
5511   }
5512 
5513   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5514   // instruction.
5515   unsigned Opc = LHS.getOpcode();
5516   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5517                       !Subtarget->isThumb1Only();
5518   if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
5519       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5520        Opc == ISD::USUBO || OptimizeMul) &&
5521       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
5522     // Only lower legal XALUO ops.
5523     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
5524       return SDValue();
5525 
5526     // The actual operation with overflow check.
5527     SDValue Value, OverflowCmp;
5528     SDValue ARMcc;
5529     std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);
5530 
5531     if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
5532       // Reverse the condition code.
5533       ARMCC::CondCodes CondCode =
5534           (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5535       CondCode = ARMCC::getOppositeCondition(CondCode);
5536       ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5537     }
5538     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5539 
5540     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5541                        OverflowCmp);
5542   }
5543 
5544   if (LHS.getValueType() == MVT::i32) {
5545     SDValue ARMcc;
5546     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5547     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5548     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5549                        Chain, Dest, ARMcc, CCR, Cmp);
5550   }
5551 
5552   if (getTargetMachine().Options.UnsafeFPMath &&
5553       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
5554        CC == ISD::SETNE || CC == ISD::SETUNE)) {
5555     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
5556       return Result;
5557   }
5558 
5559   ARMCC::CondCodes CondCode, CondCode2;
5560   FPCCToARMCC(CC, CondCode, CondCode2);
5561 
5562   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5563   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5564   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5565   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5566   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
5567   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5568   if (CondCode2 != ARMCC::AL) {
5569     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
5570     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
5571     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5572   }
5573   return Res;
5574 }
5575 
5576 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
5577   SDValue Chain = Op.getOperand(0);
5578   SDValue Table = Op.getOperand(1);
5579   SDValue Index = Op.getOperand(2);
5580   SDLoc dl(Op);
5581 
5582   EVT PTy = getPointerTy(DAG.getDataLayout());
5583   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
5584   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
5585   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
5586   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
5587   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
5588   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
5589     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
5590     // which does another jump to the destination. This also makes it easier
5591     // to translate it to TBB / TBH later (Thumb2 only).
5592     // FIXME: This might not work if the function is extremely large.
5593     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
5594                        Addr, Op.getOperand(2), JTI);
5595   }
5596   if (isPositionIndependent() || Subtarget->isROPI()) {
5597     Addr =
5598         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
5599                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5600     Chain = Addr.getValue(1);
5601     Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
5602     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5603   } else {
5604     Addr =
5605         DAG.getLoad(PTy, dl, Chain, Addr,
5606                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5607     Chain = Addr.getValue(1);
5608     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5609   }
5610 }
5611 
5612 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
5613   EVT VT = Op.getValueType();
5614   SDLoc dl(Op);
5615 
5616   if (Op.getValueType().getVectorElementType() == MVT::i32) {
5617     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
5618       return Op;
5619     return DAG.UnrollVectorOp(Op.getNode());
5620   }
5621 
5622   const bool HasFullFP16 =
5623     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5624 
5625   EVT NewTy;
5626   const EVT OpTy = Op.getOperand(0).getValueType();
5627   if (OpTy == MVT::v4f32)
5628     NewTy = MVT::v4i32;
5629   else if (OpTy == MVT::v4f16 && HasFullFP16)
5630     NewTy = MVT::v4i16;
5631   else if (OpTy == MVT::v8f16 && HasFullFP16)
5632     NewTy = MVT::v8i16;
5633   else
5634     llvm_unreachable("Invalid type for custom lowering!");
5635 
5636   if (VT != MVT::v4i16 && VT != MVT::v8i16)
5637     return DAG.UnrollVectorOp(Op.getNode());
5638 
5639   Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5640   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
5641 }
5642 
5643 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
5644   EVT VT = Op.getValueType();
5645   if (VT.isVector())
5646     return LowerVectorFP_TO_INT(Op, DAG);
5647 
5648   bool IsStrict = Op->isStrictFPOpcode();
5649   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
5650 
5651   if (isUnsupportedFloatingType(SrcVal.getValueType())) {
5652     RTLIB::Libcall LC;
5653     if (Op.getOpcode() == ISD::FP_TO_SINT ||
5654         Op.getOpcode() == ISD::STRICT_FP_TO_SINT)
5655       LC = RTLIB::getFPTOSINT(SrcVal.getValueType(),
5656                               Op.getValueType());
5657     else
5658       LC = RTLIB::getFPTOUINT(SrcVal.getValueType(),
5659                               Op.getValueType());
5660     SDLoc Loc(Op);
5661     MakeLibCallOptions CallOptions;
5662     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
5663     SDValue Result;
5664     std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal,
5665                                           CallOptions, Loc, Chain);
5666     return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
5667   }
5668 
5669   // FIXME: Remove this when we have strict fp instruction selection patterns
5670   if (IsStrict) {
5671     SDLoc Loc(Op);
5672     SDValue Result =
5673         DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
5674                                                              : ISD::FP_TO_UINT,
5675                     Loc, Op.getValueType(), SrcVal);
5676     return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
5677   }
5678 
5679   return Op;
5680 }
5681 
5682 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
5683   EVT VT = Op.getValueType();
5684   SDLoc dl(Op);
5685 
5686   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
5687     if (VT.getVectorElementType() == MVT::f32)
5688       return Op;
5689     return DAG.UnrollVectorOp(Op.getNode());
5690   }
5691 
5692   assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
5693           Op.getOperand(0).getValueType() == MVT::v8i16) &&
5694          "Invalid type for custom lowering!");
5695 
5696   const bool HasFullFP16 =
5697     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5698 
5699   EVT DestVecType;
5700   if (VT == MVT::v4f32)
5701     DestVecType = MVT::v4i32;
5702   else if (VT == MVT::v4f16 && HasFullFP16)
5703     DestVecType = MVT::v4i16;
5704   else if (VT == MVT::v8f16 && HasFullFP16)
5705     DestVecType = MVT::v8i16;
5706   else
5707     return DAG.UnrollVectorOp(Op.getNode());
5708 
5709   unsigned CastOpc;
5710   unsigned Opc;
5711   switch (Op.getOpcode()) {
5712   default: llvm_unreachable("Invalid opcode!");
5713   case ISD::SINT_TO_FP:
5714     CastOpc = ISD::SIGN_EXTEND;
5715     Opc = ISD::SINT_TO_FP;
5716     break;
5717   case ISD::UINT_TO_FP:
5718     CastOpc = ISD::ZERO_EXTEND;
5719     Opc = ISD::UINT_TO_FP;
5720     break;
5721   }
5722 
5723   Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5724   return DAG.getNode(Opc, dl, VT, Op);
5725 }
5726 
5727 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
5728   EVT VT = Op.getValueType();
5729   if (VT.isVector())
5730     return LowerVectorINT_TO_FP(Op, DAG);
5731   if (isUnsupportedFloatingType(VT)) {
5732     RTLIB::Libcall LC;
5733     if (Op.getOpcode() == ISD::SINT_TO_FP)
5734       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
5735                               Op.getValueType());
5736     else
5737       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
5738                               Op.getValueType());
5739     MakeLibCallOptions CallOptions;
5740     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
5741                        CallOptions, SDLoc(Op)).first;
5742   }
5743 
5744   return Op;
5745 }
5746 
5747 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
5748   // Implement fcopysign with a fabs and a conditional fneg.
5749   SDValue Tmp0 = Op.getOperand(0);
5750   SDValue Tmp1 = Op.getOperand(1);
5751   SDLoc dl(Op);
5752   EVT VT = Op.getValueType();
5753   EVT SrcVT = Tmp1.getValueType();
5754   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
5755     Tmp0.getOpcode() == ARMISD::VMOVDRR;
5756   bool UseNEON = !InGPR && Subtarget->hasNEON();
5757 
5758   if (UseNEON) {
5759     // Use VBSL to copy the sign bit.
5760     unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80);
5761     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
5762                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
5763     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
5764     if (VT == MVT::f64)
5765       Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5766                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
5767                          DAG.getConstant(32, dl, MVT::i32));
5768     else /*if (VT == MVT::f32)*/
5769       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
5770     if (SrcVT == MVT::f32) {
5771       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
5772       if (VT == MVT::f64)
5773         Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5774                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
5775                            DAG.getConstant(32, dl, MVT::i32));
5776     } else if (VT == MVT::f32)
5777       Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64,
5778                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
5779                          DAG.getConstant(32, dl, MVT::i32));
5780     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
5781     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
5782 
5783     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
5784                                             dl, MVT::i32);
5785     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
5786     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
5787                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
5788 
5789     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
5790                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
5791                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
5792     if (VT == MVT::f32) {
5793       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
5794       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
5795                         DAG.getConstant(0, dl, MVT::i32));
5796     } else {
5797       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
5798     }
5799 
5800     return Res;
5801   }
5802 
5803   // Bitcast operand 1 to i32.
5804   if (SrcVT == MVT::f64)
5805     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5806                        Tmp1).getValue(1);
5807   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
5808 
5809   // Or in the signbit with integer operations.
5810   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
5811   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5812   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
5813   if (VT == MVT::f32) {
5814     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
5815                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
5816     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5817                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
5818   }
5819 
5820   // f64: Or the high part with signbit and then combine two parts.
5821   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5822                      Tmp0);
5823   SDValue Lo = Tmp0.getValue(0);
5824   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
5825   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
5826   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
5827 }
5828 
5829 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
5830   MachineFunction &MF = DAG.getMachineFunction();
5831   MachineFrameInfo &MFI = MF.getFrameInfo();
5832   MFI.setReturnAddressIsTaken(true);
5833 
5834   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
5835     return SDValue();
5836 
5837   EVT VT = Op.getValueType();
5838   SDLoc dl(Op);
5839   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5840   if (Depth) {
5841     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5842     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
5843     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
5844                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
5845                        MachinePointerInfo());
5846   }
5847 
5848   // Return LR, which contains the return address. Mark it an implicit live-in.
5849   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
5850   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
5851 }
5852 
5853 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
5854   const ARMBaseRegisterInfo &ARI =
5855     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
5856   MachineFunction &MF = DAG.getMachineFunction();
5857   MachineFrameInfo &MFI = MF.getFrameInfo();
5858   MFI.setFrameAddressIsTaken(true);
5859 
5860   EVT VT = Op.getValueType();
5861   SDLoc dl(Op);  // FIXME probably not meaningful
5862   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5863   Register FrameReg = ARI.getFrameRegister(MF);
5864   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
5865   while (Depth--)
5866     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
5867                             MachinePointerInfo());
5868   return FrameAddr;
5869 }
5870 
5871 // FIXME? Maybe this could be a TableGen attribute on some registers and
5872 // this table could be generated automatically from RegInfo.
5873 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
5874                                               const MachineFunction &MF) const {
5875   Register Reg = StringSwitch<unsigned>(RegName)
5876                        .Case("sp", ARM::SP)
5877                        .Default(0);
5878   if (Reg)
5879     return Reg;
5880   report_fatal_error(Twine("Invalid register name \""
5881                               + StringRef(RegName)  + "\"."));
5882 }
5883 
5884 // Result is 64 bit value so split into two 32 bit values and return as a
5885 // pair of values.
5886 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
5887                                 SelectionDAG &DAG) {
5888   SDLoc DL(N);
5889 
5890   // This function is only supposed to be called for i64 type destination.
5891   assert(N->getValueType(0) == MVT::i64
5892           && "ExpandREAD_REGISTER called for non-i64 type result.");
5893 
5894   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
5895                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
5896                              N->getOperand(0),
5897                              N->getOperand(1));
5898 
5899   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
5900                     Read.getValue(1)));
5901   Results.push_back(Read.getOperand(0));
5902 }
5903 
5904 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
5905 /// When \p DstVT, the destination type of \p BC, is on the vector
5906 /// register bank and the source of bitcast, \p Op, operates on the same bank,
5907 /// it might be possible to combine them, such that everything stays on the
5908 /// vector register bank.
5909 /// \p return The node that would replace \p BT, if the combine
5910 /// is possible.
5911 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
5912                                                 SelectionDAG &DAG) {
5913   SDValue Op = BC->getOperand(0);
5914   EVT DstVT = BC->getValueType(0);
5915 
5916   // The only vector instruction that can produce a scalar (remember,
5917   // since the bitcast was about to be turned into VMOVDRR, the source
5918   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
5919   // Moreover, we can do this combine only if there is one use.
5920   // Finally, if the destination type is not a vector, there is not
5921   // much point on forcing everything on the vector bank.
5922   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5923       !Op.hasOneUse())
5924     return SDValue();
5925 
5926   // If the index is not constant, we will introduce an additional
5927   // multiply that will stick.
5928   // Give up in that case.
5929   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5930   if (!Index)
5931     return SDValue();
5932   unsigned DstNumElt = DstVT.getVectorNumElements();
5933 
5934   // Compute the new index.
5935   const APInt &APIntIndex = Index->getAPIntValue();
5936   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
5937   NewIndex *= APIntIndex;
5938   // Check if the new constant index fits into i32.
5939   if (NewIndex.getBitWidth() > 32)
5940     return SDValue();
5941 
5942   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
5943   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
5944   SDLoc dl(Op);
5945   SDValue ExtractSrc = Op.getOperand(0);
5946   EVT VecVT = EVT::getVectorVT(
5947       *DAG.getContext(), DstVT.getScalarType(),
5948       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
5949   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
5950   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
5951                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
5952 }
5953 
5954 /// ExpandBITCAST - If the target supports VFP, this function is called to
5955 /// expand a bit convert where either the source or destination type is i64 to
5956 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
5957 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
5958 /// vectors), since the legalizer won't know what to do with that.
5959 SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
5960                                          const ARMSubtarget *Subtarget) const {
5961   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5962   SDLoc dl(N);
5963   SDValue Op = N->getOperand(0);
5964 
5965   // This function is only supposed to be called for i16 and i64 types, either
5966   // as the source or destination of the bit convert.
5967   EVT SrcVT = Op.getValueType();
5968   EVT DstVT = N->getValueType(0);
5969 
5970   if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) &&
5971       (DstVT == MVT::f16 || DstVT == MVT::bf16))
5972     return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(),
5973                      DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op));
5974 
5975   if ((DstVT == MVT::i16 || DstVT == MVT::i32) &&
5976       (SrcVT == MVT::f16 || SrcVT == MVT::bf16))
5977     return DAG.getNode(
5978         ISD::TRUNCATE, SDLoc(N), DstVT,
5979         MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op));
5980 
5981   if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
5982     return SDValue();
5983 
5984   // Turn i64->f64 into VMOVDRR.
5985   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
5986     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
5987     // if we can combine the bitcast with its source.
5988     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
5989       return Val;
5990 
5991     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5992                              DAG.getConstant(0, dl, MVT::i32));
5993     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5994                              DAG.getConstant(1, dl, MVT::i32));
5995     return DAG.getNode(ISD::BITCAST, dl, DstVT,
5996                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
5997   }
5998 
5999   // Turn f64->i64 into VMOVRRD.
6000   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
6001     SDValue Cvt;
6002     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
6003         SrcVT.getVectorNumElements() > 1)
6004       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6005                         DAG.getVTList(MVT::i32, MVT::i32),
6006                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
6007     else
6008       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6009                         DAG.getVTList(MVT::i32, MVT::i32), Op);
6010     // Merge the pieces into a single i64 value.
6011     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
6012   }
6013 
6014   return SDValue();
6015 }
6016 
6017 /// getZeroVector - Returns a vector of specified type with all zero elements.
6018 /// Zero vectors are used to represent vector negation and in those cases
6019 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
6020 /// not support i64 elements, so sometimes the zero vectors will need to be
6021 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
6022 /// zero vector.
6023 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6024   assert(VT.isVector() && "Expected a vector type");
6025   // The canonical modified immediate encoding of a zero vector is....0!
6026   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
6027   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
6028   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
6029   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6030 }
6031 
6032 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
6033 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6034 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
6035                                                 SelectionDAG &DAG) const {
6036   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6037   EVT VT = Op.getValueType();
6038   unsigned VTBits = VT.getSizeInBits();
6039   SDLoc dl(Op);
6040   SDValue ShOpLo = Op.getOperand(0);
6041   SDValue ShOpHi = Op.getOperand(1);
6042   SDValue ShAmt  = Op.getOperand(2);
6043   SDValue ARMcc;
6044   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6045   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
6046 
6047   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
6048 
6049   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6050                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6051   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
6052   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6053                                    DAG.getConstant(VTBits, dl, MVT::i32));
6054   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
6055   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6056   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
6057   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6058                             ISD::SETGE, ARMcc, DAG, dl);
6059   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
6060                            ARMcc, CCR, CmpLo);
6061 
6062   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
6063   SDValue HiBigShift = Opc == ISD::SRA
6064                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
6065                                          DAG.getConstant(VTBits - 1, dl, VT))
6066                            : DAG.getConstant(0, dl, VT);
6067   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6068                             ISD::SETGE, ARMcc, DAG, dl);
6069   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6070                            ARMcc, CCR, CmpHi);
6071 
6072   SDValue Ops[2] = { Lo, Hi };
6073   return DAG.getMergeValues(Ops, dl);
6074 }
6075 
6076 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
6077 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6078 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
6079                                                SelectionDAG &DAG) const {
6080   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6081   EVT VT = Op.getValueType();
6082   unsigned VTBits = VT.getSizeInBits();
6083   SDLoc dl(Op);
6084   SDValue ShOpLo = Op.getOperand(0);
6085   SDValue ShOpHi = Op.getOperand(1);
6086   SDValue ShAmt  = Op.getOperand(2);
6087   SDValue ARMcc;
6088   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6089 
6090   assert(Op.getOpcode() == ISD::SHL_PARTS);
6091   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6092                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6093   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
6094   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
6095   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6096 
6097   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6098                                    DAG.getConstant(VTBits, dl, MVT::i32));
6099   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
6100   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6101                             ISD::SETGE, ARMcc, DAG, dl);
6102   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6103                            ARMcc, CCR, CmpHi);
6104 
6105   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6106                           ISD::SETGE, ARMcc, DAG, dl);
6107   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
6108   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
6109                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
6110 
6111   SDValue Ops[2] = { Lo, Hi };
6112   return DAG.getMergeValues(Ops, dl);
6113 }
6114 
6115 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
6116                                             SelectionDAG &DAG) const {
6117   // The rounding mode is in bits 23:22 of the FPSCR.
6118   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
6119   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
6120   // so that the shift + and get folded into a bitfield extract.
6121   SDLoc dl(Op);
6122   SDValue Chain = Op.getOperand(0);
6123   SDValue Ops[] = {Chain,
6124                    DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)};
6125 
6126   SDValue FPSCR =
6127       DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops);
6128   Chain = FPSCR.getValue(1);
6129   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
6130                                   DAG.getConstant(1U << 22, dl, MVT::i32));
6131   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
6132                               DAG.getConstant(22, dl, MVT::i32));
6133   SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
6134                             DAG.getConstant(3, dl, MVT::i32));
6135   return DAG.getMergeValues({And, Chain}, dl);
6136 }
6137 
6138 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
6139                          const ARMSubtarget *ST) {
6140   SDLoc dl(N);
6141   EVT VT = N->getValueType(0);
6142   if (VT.isVector() && ST->hasNEON()) {
6143 
6144     // Compute the least significant set bit: LSB = X & -X
6145     SDValue X = N->getOperand(0);
6146     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
6147     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
6148 
6149     EVT ElemTy = VT.getVectorElementType();
6150 
6151     if (ElemTy == MVT::i8) {
6152       // Compute with: cttz(x) = ctpop(lsb - 1)
6153       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6154                                 DAG.getTargetConstant(1, dl, ElemTy));
6155       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6156       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6157     }
6158 
6159     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
6160         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
6161       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
6162       unsigned NumBits = ElemTy.getSizeInBits();
6163       SDValue WidthMinus1 =
6164           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6165                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
6166       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
6167       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
6168     }
6169 
6170     // Compute with: cttz(x) = ctpop(lsb - 1)
6171 
6172     // Compute LSB - 1.
6173     SDValue Bits;
6174     if (ElemTy == MVT::i64) {
6175       // Load constant 0xffff'ffff'ffff'ffff to register.
6176       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6177                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
6178       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
6179     } else {
6180       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6181                                 DAG.getTargetConstant(1, dl, ElemTy));
6182       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6183     }
6184     return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6185   }
6186 
6187   if (!ST->hasV6T2Ops())
6188     return SDValue();
6189 
6190   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
6191   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
6192 }
6193 
6194 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
6195                           const ARMSubtarget *ST) {
6196   EVT VT = N->getValueType(0);
6197   SDLoc DL(N);
6198 
6199   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
6200   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
6201           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
6202          "Unexpected type for custom ctpop lowering");
6203 
6204   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6205   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
6206   SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
6207   Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);
6208 
6209   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
6210   unsigned EltSize = 8;
6211   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
6212   while (EltSize != VT.getScalarSizeInBits()) {
6213     SmallVector<SDValue, 8> Ops;
6214     Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
6215                                   TLI.getPointerTy(DAG.getDataLayout())));
6216     Ops.push_back(Res);
6217 
6218     EltSize *= 2;
6219     NumElts /= 2;
6220     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
6221     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
6222   }
6223 
6224   return Res;
6225 }
6226 
6227 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
6228 /// operand of a vector shift operation, where all the elements of the
6229 /// build_vector must have the same constant integer value.
6230 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
6231   // Ignore bit_converts.
6232   while (Op.getOpcode() == ISD::BITCAST)
6233     Op = Op.getOperand(0);
6234   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
6235   APInt SplatBits, SplatUndef;
6236   unsigned SplatBitSize;
6237   bool HasAnyUndefs;
6238   if (!BVN ||
6239       !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6240                             ElementBits) ||
6241       SplatBitSize > ElementBits)
6242     return false;
6243   Cnt = SplatBits.getSExtValue();
6244   return true;
6245 }
6246 
6247 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
6248 /// operand of a vector shift left operation.  That value must be in the range:
6249 ///   0 <= Value < ElementBits for a left shift; or
6250 ///   0 <= Value <= ElementBits for a long left shift.
6251 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
6252   assert(VT.isVector() && "vector shift count is not a vector type");
6253   int64_t ElementBits = VT.getScalarSizeInBits();
6254   if (!getVShiftImm(Op, ElementBits, Cnt))
6255     return false;
6256   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6257 }
6258 
6259 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
6260 /// operand of a vector shift right operation.  For a shift opcode, the value
6261 /// is positive, but for an intrinsic the value count must be negative. The
6262 /// absolute value must be in the range:
6263 ///   1 <= |Value| <= ElementBits for a right shift; or
6264 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
6265 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
6266                          int64_t &Cnt) {
6267   assert(VT.isVector() && "vector shift count is not a vector type");
6268   int64_t ElementBits = VT.getScalarSizeInBits();
6269   if (!getVShiftImm(Op, ElementBits, Cnt))
6270     return false;
6271   if (!isIntrinsic)
6272     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6273   if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
6274     Cnt = -Cnt;
6275     return true;
6276   }
6277   return false;
6278 }
6279 
6280 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
6281                           const ARMSubtarget *ST) {
6282   EVT VT = N->getValueType(0);
6283   SDLoc dl(N);
6284   int64_t Cnt;
6285 
6286   if (!VT.isVector())
6287     return SDValue();
6288 
6289   // We essentially have two forms here. Shift by an immediate and shift by a
6290   // vector register (there are also shift by a gpr, but that is just handled
6291   // with a tablegen pattern). We cannot easily match shift by an immediate in
6292   // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
6293   // For shifting by a vector, we don't have VSHR, only VSHL (which can be
6294   // signed or unsigned, and a negative shift indicates a shift right).
6295   if (N->getOpcode() == ISD::SHL) {
6296     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
6297       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
6298                          DAG.getConstant(Cnt, dl, MVT::i32));
6299     return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0),
6300                        N->getOperand(1));
6301   }
6302 
6303   assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
6304          "unexpected vector shift opcode");
6305 
6306   if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
6307     unsigned VShiftOpc =
6308         (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
6309     return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
6310                        DAG.getConstant(Cnt, dl, MVT::i32));
6311   }
6312 
6313   // Other right shifts we don't have operations for (we use a shift left by a
6314   // negative number).
6315   EVT ShiftVT = N->getOperand(1).getValueType();
6316   SDValue NegatedCount = DAG.getNode(
6317       ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1));
6318   unsigned VShiftOpc =
6319       (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu);
6320   return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount);
6321 }
6322 
6323 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
6324                                 const ARMSubtarget *ST) {
6325   EVT VT = N->getValueType(0);
6326   SDLoc dl(N);
6327 
6328   // We can get here for a node like i32 = ISD::SHL i32, i64
6329   if (VT != MVT::i64)
6330     return SDValue();
6331 
6332   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
6333           N->getOpcode() == ISD::SHL) &&
6334          "Unknown shift to lower!");
6335 
6336   unsigned ShOpc = N->getOpcode();
6337   if (ST->hasMVEIntegerOps()) {
6338     SDValue ShAmt = N->getOperand(1);
6339     unsigned ShPartsOpc = ARMISD::LSLL;
6340     ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);
6341 
6342     // If the shift amount is greater than 32 or has a greater bitwidth than 64
6343     // then do the default optimisation
6344     if (ShAmt->getValueType(0).getSizeInBits() > 64 ||
6345         (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32)))
6346       return SDValue();
6347 
6348     // Extract the lower 32 bits of the shift amount if it's not an i32
6349     if (ShAmt->getValueType(0) != MVT::i32)
6350       ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32);
6351 
6352     if (ShOpc == ISD::SRL) {
6353       if (!Con)
6354         // There is no t2LSRLr instruction so negate and perform an lsll if the
6355         // shift amount is in a register, emulating a right shift.
6356         ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6357                             DAG.getConstant(0, dl, MVT::i32), ShAmt);
6358       else
6359         // Else generate an lsrl on the immediate shift amount
6360         ShPartsOpc = ARMISD::LSRL;
6361     } else if (ShOpc == ISD::SRA)
6362       ShPartsOpc = ARMISD::ASRL;
6363 
6364     // Lower 32 bits of the destination/source
6365     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6366                              DAG.getConstant(0, dl, MVT::i32));
6367     // Upper 32 bits of the destination/source
6368     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6369                              DAG.getConstant(1, dl, MVT::i32));
6370 
6371     // Generate the shift operation as computed above
6372     Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
6373                      ShAmt);
6374     // The upper 32 bits come from the second return value of lsll
6375     Hi = SDValue(Lo.getNode(), 1);
6376     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6377   }
6378 
6379   // We only lower SRA, SRL of 1 here, all others use generic lowering.
6380   if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
6381     return SDValue();
6382 
6383   // If we are in thumb mode, we don't have RRX.
6384   if (ST->isThumb1Only())
6385     return SDValue();
6386 
6387   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
6388   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6389                            DAG.getConstant(0, dl, MVT::i32));
6390   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6391                            DAG.getConstant(1, dl, MVT::i32));
6392 
6393   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
6394   // captures the result into a carry flag.
6395   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
6396   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
6397 
6398   // The low part is an ARMISD::RRX operand, which shifts the carry in.
6399   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
6400 
6401   // Merge the pieces into a single i64 value.
6402  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6403 }
6404 
6405 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
6406                            const ARMSubtarget *ST) {
6407   bool Invert = false;
6408   bool Swap = false;
6409   unsigned Opc = ARMCC::AL;
6410 
6411   SDValue Op0 = Op.getOperand(0);
6412   SDValue Op1 = Op.getOperand(1);
6413   SDValue CC = Op.getOperand(2);
6414   EVT VT = Op.getValueType();
6415   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
6416   SDLoc dl(Op);
6417 
6418   EVT CmpVT;
6419   if (ST->hasNEON())
6420     CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
6421   else {
6422     assert(ST->hasMVEIntegerOps() &&
6423            "No hardware support for integer vector comparison!");
6424 
6425     if (Op.getValueType().getVectorElementType() != MVT::i1)
6426       return SDValue();
6427 
6428     // Make sure we expand floating point setcc to scalar if we do not have
6429     // mve.fp, so that we can handle them from there.
6430     if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps())
6431       return SDValue();
6432 
6433     CmpVT = VT;
6434   }
6435 
6436   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
6437       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
6438     // Special-case integer 64-bit equality comparisons. They aren't legal,
6439     // but they can be lowered with a few vector instructions.
6440     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
6441     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
6442     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
6443     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
6444     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
6445                               DAG.getCondCode(ISD::SETEQ));
6446     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
6447     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
6448     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
6449     if (SetCCOpcode == ISD::SETNE)
6450       Merged = DAG.getNOT(dl, Merged, CmpVT);
6451     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
6452     return Merged;
6453   }
6454 
6455   if (CmpVT.getVectorElementType() == MVT::i64)
6456     // 64-bit comparisons are not legal in general.
6457     return SDValue();
6458 
6459   if (Op1.getValueType().isFloatingPoint()) {
6460     switch (SetCCOpcode) {
6461     default: llvm_unreachable("Illegal FP comparison");
6462     case ISD::SETUNE:
6463     case ISD::SETNE:
6464       if (ST->hasMVEFloatOps()) {
6465         Opc = ARMCC::NE; break;
6466       } else {
6467         Invert = true; LLVM_FALLTHROUGH;
6468       }
6469     case ISD::SETOEQ:
6470     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6471     case ISD::SETOLT:
6472     case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
6473     case ISD::SETOGT:
6474     case ISD::SETGT:  Opc = ARMCC::GT; break;
6475     case ISD::SETOLE:
6476     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
6477     case ISD::SETOGE:
6478     case ISD::SETGE: Opc = ARMCC::GE; break;
6479     case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
6480     case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
6481     case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
6482     case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
6483     case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
6484     case ISD::SETONE: {
6485       // Expand this to (OLT | OGT).
6486       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6487                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6488       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6489                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6490       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6491       if (Invert)
6492         Result = DAG.getNOT(dl, Result, VT);
6493       return Result;
6494     }
6495     case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH;
6496     case ISD::SETO: {
6497       // Expand this to (OLT | OGE).
6498       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6499                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6500       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6501                                    DAG.getConstant(ARMCC::GE, dl, MVT::i32));
6502       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6503       if (Invert)
6504         Result = DAG.getNOT(dl, Result, VT);
6505       return Result;
6506     }
6507     }
6508   } else {
6509     // Integer comparisons.
6510     switch (SetCCOpcode) {
6511     default: llvm_unreachable("Illegal integer comparison");
6512     case ISD::SETNE:
6513       if (ST->hasMVEIntegerOps()) {
6514         Opc = ARMCC::NE; break;
6515       } else {
6516         Invert = true; LLVM_FALLTHROUGH;
6517       }
6518     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6519     case ISD::SETLT:  Swap = true; LLVM_FALLTHROUGH;
6520     case ISD::SETGT:  Opc = ARMCC::GT; break;
6521     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
6522     case ISD::SETGE:  Opc = ARMCC::GE; break;
6523     case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
6524     case ISD::SETUGT: Opc = ARMCC::HI; break;
6525     case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
6526     case ISD::SETUGE: Opc = ARMCC::HS; break;
6527     }
6528 
6529     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
6530     if (ST->hasNEON() && Opc == ARMCC::EQ) {
6531       SDValue AndOp;
6532       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6533         AndOp = Op0;
6534       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
6535         AndOp = Op1;
6536 
6537       // Ignore bitconvert.
6538       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
6539         AndOp = AndOp.getOperand(0);
6540 
6541       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
6542         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
6543         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
6544         SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1);
6545         if (!Invert)
6546           Result = DAG.getNOT(dl, Result, VT);
6547         return Result;
6548       }
6549     }
6550   }
6551 
6552   if (Swap)
6553     std::swap(Op0, Op1);
6554 
6555   // If one of the operands is a constant vector zero, attempt to fold the
6556   // comparison to a specialized compare-against-zero form.
6557   SDValue SingleOp;
6558   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6559     SingleOp = Op0;
6560   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
6561     if (Opc == ARMCC::GE)
6562       Opc = ARMCC::LE;
6563     else if (Opc == ARMCC::GT)
6564       Opc = ARMCC::LT;
6565     SingleOp = Op1;
6566   }
6567 
6568   SDValue Result;
6569   if (SingleOp.getNode()) {
6570     Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp,
6571                          DAG.getConstant(Opc, dl, MVT::i32));
6572   } else {
6573     Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6574                          DAG.getConstant(Opc, dl, MVT::i32));
6575   }
6576 
6577   Result = DAG.getSExtOrTrunc(Result, dl, VT);
6578 
6579   if (Invert)
6580     Result = DAG.getNOT(dl, Result, VT);
6581 
6582   return Result;
6583 }
6584 
6585 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
6586   SDValue LHS = Op.getOperand(0);
6587   SDValue RHS = Op.getOperand(1);
6588   SDValue Carry = Op.getOperand(2);
6589   SDValue Cond = Op.getOperand(3);
6590   SDLoc DL(Op);
6591 
6592   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
6593 
6594   // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
6595   // have to invert the carry first.
6596   Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
6597                       DAG.getConstant(1, DL, MVT::i32), Carry);
6598   // This converts the boolean value carry into the carry flag.
6599   Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
6600 
6601   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
6602   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
6603 
6604   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
6605   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
6606   SDValue ARMcc = DAG.getConstant(
6607       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
6608   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6609   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
6610                                    Cmp.getValue(1), SDValue());
6611   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
6612                      CCR, Chain.getValue(1));
6613 }
6614 
6615 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a
6616 /// valid vector constant for a NEON or MVE instruction with a "modified
6617 /// immediate" operand (e.g., VMOV).  If so, return the encoded value.
6618 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
6619                                  unsigned SplatBitSize, SelectionDAG &DAG,
6620                                  const SDLoc &dl, EVT &VT, EVT VectorVT,
6621                                  VMOVModImmType type) {
6622   unsigned OpCmode, Imm;
6623   bool is128Bits = VectorVT.is128BitVector();
6624 
6625   // SplatBitSize is set to the smallest size that splats the vector, so a
6626   // zero vector will always have SplatBitSize == 8.  However, NEON modified
6627   // immediate instructions others than VMOV do not support the 8-bit encoding
6628   // of a zero vector, and the default encoding of zero is supposed to be the
6629   // 32-bit version.
6630   if (SplatBits == 0)
6631     SplatBitSize = 32;
6632 
6633   switch (SplatBitSize) {
6634   case 8:
6635     if (type != VMOVModImm)
6636       return SDValue();
6637     // Any 1-byte value is OK.  Op=0, Cmode=1110.
6638     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
6639     OpCmode = 0xe;
6640     Imm = SplatBits;
6641     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
6642     break;
6643 
6644   case 16:
6645     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
6646     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
6647     if ((SplatBits & ~0xff) == 0) {
6648       // Value = 0x00nn: Op=x, Cmode=100x.
6649       OpCmode = 0x8;
6650       Imm = SplatBits;
6651       break;
6652     }
6653     if ((SplatBits & ~0xff00) == 0) {
6654       // Value = 0xnn00: Op=x, Cmode=101x.
6655       OpCmode = 0xa;
6656       Imm = SplatBits >> 8;
6657       break;
6658     }
6659     return SDValue();
6660 
6661   case 32:
6662     // NEON's 32-bit VMOV supports splat values where:
6663     // * only one byte is nonzero, or
6664     // * the least significant byte is 0xff and the second byte is nonzero, or
6665     // * the least significant 2 bytes are 0xff and the third is nonzero.
6666     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
6667     if ((SplatBits & ~0xff) == 0) {
6668       // Value = 0x000000nn: Op=x, Cmode=000x.
6669       OpCmode = 0;
6670       Imm = SplatBits;
6671       break;
6672     }
6673     if ((SplatBits & ~0xff00) == 0) {
6674       // Value = 0x0000nn00: Op=x, Cmode=001x.
6675       OpCmode = 0x2;
6676       Imm = SplatBits >> 8;
6677       break;
6678     }
6679     if ((SplatBits & ~0xff0000) == 0) {
6680       // Value = 0x00nn0000: Op=x, Cmode=010x.
6681       OpCmode = 0x4;
6682       Imm = SplatBits >> 16;
6683       break;
6684     }
6685     if ((SplatBits & ~0xff000000) == 0) {
6686       // Value = 0xnn000000: Op=x, Cmode=011x.
6687       OpCmode = 0x6;
6688       Imm = SplatBits >> 24;
6689       break;
6690     }
6691 
6692     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
6693     if (type == OtherModImm) return SDValue();
6694 
6695     if ((SplatBits & ~0xffff) == 0 &&
6696         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6697       // Value = 0x0000nnff: Op=x, Cmode=1100.
6698       OpCmode = 0xc;
6699       Imm = SplatBits >> 8;
6700       break;
6701     }
6702 
6703     // cmode == 0b1101 is not supported for MVE VMVN
6704     if (type == MVEVMVNModImm)
6705       return SDValue();
6706 
6707     if ((SplatBits & ~0xffffff) == 0 &&
6708         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6709       // Value = 0x00nnffff: Op=x, Cmode=1101.
6710       OpCmode = 0xd;
6711       Imm = SplatBits >> 16;
6712       break;
6713     }
6714 
6715     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
6716     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
6717     // VMOV.I32.  A (very) minor optimization would be to replicate the value
6718     // and fall through here to test for a valid 64-bit splat.  But, then the
6719     // caller would also need to check and handle the change in size.
6720     return SDValue();
6721 
6722   case 64: {
6723     if (type != VMOVModImm)
6724       return SDValue();
6725     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
6726     uint64_t BitMask = 0xff;
6727     uint64_t Val = 0;
6728     unsigned ImmMask = 1;
6729     Imm = 0;
6730     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6731       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6732         Val |= BitMask;
6733         Imm |= ImmMask;
6734       } else if ((SplatBits & BitMask) != 0) {
6735         return SDValue();
6736       }
6737       BitMask <<= 8;
6738       ImmMask <<= 1;
6739     }
6740 
6741     if (DAG.getDataLayout().isBigEndian()) {
6742       // Reverse the order of elements within the vector.
6743       unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8;
6744       unsigned Mask = (1 << BytesPerElem) - 1;
6745       unsigned NumElems = 8 / BytesPerElem;
6746       unsigned NewImm = 0;
6747       for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) {
6748         unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask);
6749         NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem;
6750       }
6751       Imm = NewImm;
6752     }
6753 
6754     // Op=1, Cmode=1110.
6755     OpCmode = 0x1e;
6756     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
6757     break;
6758   }
6759 
6760   default:
6761     llvm_unreachable("unexpected size for isVMOVModifiedImm");
6762   }
6763 
6764   unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm);
6765   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
6766 }
6767 
6768 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
6769                                            const ARMSubtarget *ST) const {
6770   EVT VT = Op.getValueType();
6771   bool IsDouble = (VT == MVT::f64);
6772   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
6773   const APFloat &FPVal = CFP->getValueAPF();
6774 
6775   // Prevent floating-point constants from using literal loads
6776   // when execute-only is enabled.
6777   if (ST->genExecuteOnly()) {
6778     // If we can represent the constant as an immediate, don't lower it
6779     if (isFPImmLegal(FPVal, VT))
6780       return Op;
6781     // Otherwise, construct as integer, and move to float register
6782     APInt INTVal = FPVal.bitcastToAPInt();
6783     SDLoc DL(CFP);
6784     switch (VT.getSimpleVT().SimpleTy) {
6785       default:
6786         llvm_unreachable("Unknown floating point type!");
6787         break;
6788       case MVT::f64: {
6789         SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
6790         SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
6791         return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
6792       }
6793       case MVT::f32:
6794           return DAG.getNode(ARMISD::VMOVSR, DL, VT,
6795               DAG.getConstant(INTVal, DL, MVT::i32));
6796     }
6797   }
6798 
6799   if (!ST->hasVFP3Base())
6800     return SDValue();
6801 
6802   // Use the default (constant pool) lowering for double constants when we have
6803   // an SP-only FPU
6804   if (IsDouble && !Subtarget->hasFP64())
6805     return SDValue();
6806 
6807   // Try splatting with a VMOV.f32...
6808   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
6809 
6810   if (ImmVal != -1) {
6811     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
6812       // We have code in place to select a valid ConstantFP already, no need to
6813       // do any mangling.
6814       return Op;
6815     }
6816 
6817     // It's a float and we are trying to use NEON operations where
6818     // possible. Lower it to a splat followed by an extract.
6819     SDLoc DL(Op);
6820     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
6821     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
6822                                       NewVal);
6823     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
6824                        DAG.getConstant(0, DL, MVT::i32));
6825   }
6826 
6827   // The rest of our options are NEON only, make sure that's allowed before
6828   // proceeding..
6829   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
6830     return SDValue();
6831 
6832   EVT VMovVT;
6833   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
6834 
6835   // It wouldn't really be worth bothering for doubles except for one very
6836   // important value, which does happen to match: 0.0. So make sure we don't do
6837   // anything stupid.
6838   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6839     return SDValue();
6840 
6841   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
6842   SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
6843                                      VMovVT, VT, VMOVModImm);
6844   if (NewVal != SDValue()) {
6845     SDLoc DL(Op);
6846     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
6847                                       NewVal);
6848     if (IsDouble)
6849       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6850 
6851     // It's a float: cast and extract a vector element.
6852     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6853                                        VecConstant);
6854     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6855                        DAG.getConstant(0, DL, MVT::i32));
6856   }
6857 
6858   // Finally, try a VMVN.i32
6859   NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
6860                              VT, VMVNModImm);
6861   if (NewVal != SDValue()) {
6862     SDLoc DL(Op);
6863     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
6864 
6865     if (IsDouble)
6866       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6867 
6868     // It's a float: cast and extract a vector element.
6869     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6870                                        VecConstant);
6871     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6872                        DAG.getConstant(0, DL, MVT::i32));
6873   }
6874 
6875   return SDValue();
6876 }
6877 
6878 // check if an VEXT instruction can handle the shuffle mask when the
6879 // vector sources of the shuffle are the same.
6880 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
6881   unsigned NumElts = VT.getVectorNumElements();
6882 
6883   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6884   if (M[0] < 0)
6885     return false;
6886 
6887   Imm = M[0];
6888 
6889   // If this is a VEXT shuffle, the immediate value is the index of the first
6890   // element.  The other shuffle indices must be the successive elements after
6891   // the first one.
6892   unsigned ExpectedElt = Imm;
6893   for (unsigned i = 1; i < NumElts; ++i) {
6894     // Increment the expected index.  If it wraps around, just follow it
6895     // back to index zero and keep going.
6896     ++ExpectedElt;
6897     if (ExpectedElt == NumElts)
6898       ExpectedElt = 0;
6899 
6900     if (M[i] < 0) continue; // ignore UNDEF indices
6901     if (ExpectedElt != static_cast<unsigned>(M[i]))
6902       return false;
6903   }
6904 
6905   return true;
6906 }
6907 
6908 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
6909                        bool &ReverseVEXT, unsigned &Imm) {
6910   unsigned NumElts = VT.getVectorNumElements();
6911   ReverseVEXT = false;
6912 
6913   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6914   if (M[0] < 0)
6915     return false;
6916 
6917   Imm = M[0];
6918 
6919   // If this is a VEXT shuffle, the immediate value is the index of the first
6920   // element.  The other shuffle indices must be the successive elements after
6921   // the first one.
6922   unsigned ExpectedElt = Imm;
6923   for (unsigned i = 1; i < NumElts; ++i) {
6924     // Increment the expected index.  If it wraps around, it may still be
6925     // a VEXT but the source vectors must be swapped.
6926     ExpectedElt += 1;
6927     if (ExpectedElt == NumElts * 2) {
6928       ExpectedElt = 0;
6929       ReverseVEXT = true;
6930     }
6931 
6932     if (M[i] < 0) continue; // ignore UNDEF indices
6933     if (ExpectedElt != static_cast<unsigned>(M[i]))
6934       return false;
6935   }
6936 
6937   // Adjust the index value if the source operands will be swapped.
6938   if (ReverseVEXT)
6939     Imm -= NumElts;
6940 
6941   return true;
6942 }
6943 
6944 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
6945 /// instruction with the specified blocksize.  (The order of the elements
6946 /// within each block of the vector is reversed.)
6947 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
6948   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6949          "Only possible block sizes for VREV are: 16, 32, 64");
6950 
6951   unsigned EltSz = VT.getScalarSizeInBits();
6952   if (EltSz == 64)
6953     return false;
6954 
6955   unsigned NumElts = VT.getVectorNumElements();
6956   unsigned BlockElts = M[0] + 1;
6957   // If the first shuffle index is UNDEF, be optimistic.
6958   if (M[0] < 0)
6959     BlockElts = BlockSize / EltSz;
6960 
6961   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6962     return false;
6963 
6964   for (unsigned i = 0; i < NumElts; ++i) {
6965     if (M[i] < 0) continue; // ignore UNDEF indices
6966     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6967       return false;
6968   }
6969 
6970   return true;
6971 }
6972 
6973 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
6974   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
6975   // range, then 0 is placed into the resulting vector. So pretty much any mask
6976   // of 8 elements can work here.
6977   return VT == MVT::v8i8 && M.size() == 8;
6978 }
6979 
6980 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
6981                                unsigned Index) {
6982   if (Mask.size() == Elements * 2)
6983     return Index / Elements;
6984   return Mask[Index] == 0 ? 0 : 1;
6985 }
6986 
6987 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
6988 // checking that pairs of elements in the shuffle mask represent the same index
6989 // in each vector, incrementing the expected index by 2 at each step.
6990 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
6991 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
6992 //  v2={e,f,g,h}
6993 // WhichResult gives the offset for each element in the mask based on which
6994 // of the two results it belongs to.
6995 //
6996 // The transpose can be represented either as:
6997 // result1 = shufflevector v1, v2, result1_shuffle_mask
6998 // result2 = shufflevector v1, v2, result2_shuffle_mask
6999 // where v1/v2 and the shuffle masks have the same number of elements
7000 // (here WhichResult (see below) indicates which result is being checked)
7001 //
7002 // or as:
7003 // results = shufflevector v1, v2, shuffle_mask
7004 // where both results are returned in one vector and the shuffle mask has twice
7005 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
7006 // want to check the low half and high half of the shuffle mask as if it were
7007 // the other case
7008 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7009   unsigned EltSz = VT.getScalarSizeInBits();
7010   if (EltSz == 64)
7011     return false;
7012 
7013   unsigned NumElts = VT.getVectorNumElements();
7014   if (M.size() != NumElts && M.size() != NumElts*2)
7015     return false;
7016 
7017   // If the mask is twice as long as the input vector then we need to check the
7018   // upper and lower parts of the mask with a matching value for WhichResult
7019   // FIXME: A mask with only even values will be rejected in case the first
7020   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
7021   // M[0] is used to determine WhichResult
7022   for (unsigned i = 0; i < M.size(); i += NumElts) {
7023     WhichResult = SelectPairHalf(NumElts, M, i);
7024     for (unsigned j = 0; j < NumElts; j += 2) {
7025       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7026           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
7027         return false;
7028     }
7029   }
7030 
7031   if (M.size() == NumElts*2)
7032     WhichResult = 0;
7033 
7034   return true;
7035 }
7036 
7037 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
7038 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7039 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
7040 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7041   unsigned EltSz = VT.getScalarSizeInBits();
7042   if (EltSz == 64)
7043     return false;
7044 
7045   unsigned NumElts = VT.getVectorNumElements();
7046   if (M.size() != NumElts && M.size() != NumElts*2)
7047     return false;
7048 
7049   for (unsigned i = 0; i < M.size(); i += NumElts) {
7050     WhichResult = SelectPairHalf(NumElts, M, i);
7051     for (unsigned j = 0; j < NumElts; j += 2) {
7052       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7053           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
7054         return false;
7055     }
7056   }
7057 
7058   if (M.size() == NumElts*2)
7059     WhichResult = 0;
7060 
7061   return true;
7062 }
7063 
7064 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
7065 // that the mask elements are either all even and in steps of size 2 or all odd
7066 // and in steps of size 2.
7067 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
7068 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
7069 //  v2={e,f,g,h}
7070 // Requires similar checks to that of isVTRNMask with
7071 // respect the how results are returned.
7072 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7073   unsigned EltSz = VT.getScalarSizeInBits();
7074   if (EltSz == 64)
7075     return false;
7076 
7077   unsigned NumElts = VT.getVectorNumElements();
7078   if (M.size() != NumElts && M.size() != NumElts*2)
7079     return false;
7080 
7081   for (unsigned i = 0; i < M.size(); i += NumElts) {
7082     WhichResult = SelectPairHalf(NumElts, M, i);
7083     for (unsigned j = 0; j < NumElts; ++j) {
7084       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
7085         return false;
7086     }
7087   }
7088 
7089   if (M.size() == NumElts*2)
7090     WhichResult = 0;
7091 
7092   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7093   if (VT.is64BitVector() && EltSz == 32)
7094     return false;
7095 
7096   return true;
7097 }
7098 
7099 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
7100 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7101 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
7102 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7103   unsigned EltSz = VT.getScalarSizeInBits();
7104   if (EltSz == 64)
7105     return false;
7106 
7107   unsigned NumElts = VT.getVectorNumElements();
7108   if (M.size() != NumElts && M.size() != NumElts*2)
7109     return false;
7110 
7111   unsigned Half = NumElts / 2;
7112   for (unsigned i = 0; i < M.size(); i += NumElts) {
7113     WhichResult = SelectPairHalf(NumElts, M, i);
7114     for (unsigned j = 0; j < NumElts; j += Half) {
7115       unsigned Idx = WhichResult;
7116       for (unsigned k = 0; k < Half; ++k) {
7117         int MIdx = M[i + j + k];
7118         if (MIdx >= 0 && (unsigned) MIdx != Idx)
7119           return false;
7120         Idx += 2;
7121       }
7122     }
7123   }
7124 
7125   if (M.size() == NumElts*2)
7126     WhichResult = 0;
7127 
7128   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7129   if (VT.is64BitVector() && EltSz == 32)
7130     return false;
7131 
7132   return true;
7133 }
7134 
7135 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
7136 // that pairs of elements of the shufflemask represent the same index in each
7137 // vector incrementing sequentially through the vectors.
7138 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
7139 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
7140 //  v2={e,f,g,h}
7141 // Requires similar checks to that of isVTRNMask with respect the how results
7142 // are returned.
7143 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7144   unsigned EltSz = VT.getScalarSizeInBits();
7145   if (EltSz == 64)
7146     return false;
7147 
7148   unsigned NumElts = VT.getVectorNumElements();
7149   if (M.size() != NumElts && M.size() != NumElts*2)
7150     return false;
7151 
7152   for (unsigned i = 0; i < M.size(); i += NumElts) {
7153     WhichResult = SelectPairHalf(NumElts, M, i);
7154     unsigned Idx = WhichResult * NumElts / 2;
7155     for (unsigned j = 0; j < NumElts; j += 2) {
7156       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7157           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
7158         return false;
7159       Idx += 1;
7160     }
7161   }
7162 
7163   if (M.size() == NumElts*2)
7164     WhichResult = 0;
7165 
7166   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7167   if (VT.is64BitVector() && EltSz == 32)
7168     return false;
7169 
7170   return true;
7171 }
7172 
7173 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
7174 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7175 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
7176 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7177   unsigned EltSz = VT.getScalarSizeInBits();
7178   if (EltSz == 64)
7179     return false;
7180 
7181   unsigned NumElts = VT.getVectorNumElements();
7182   if (M.size() != NumElts && M.size() != NumElts*2)
7183     return false;
7184 
7185   for (unsigned i = 0; i < M.size(); i += NumElts) {
7186     WhichResult = SelectPairHalf(NumElts, M, i);
7187     unsigned Idx = WhichResult * NumElts / 2;
7188     for (unsigned j = 0; j < NumElts; j += 2) {
7189       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7190           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
7191         return false;
7192       Idx += 1;
7193     }
7194   }
7195 
7196   if (M.size() == NumElts*2)
7197     WhichResult = 0;
7198 
7199   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7200   if (VT.is64BitVector() && EltSz == 32)
7201     return false;
7202 
7203   return true;
7204 }
7205 
7206 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
7207 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
7208 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
7209                                            unsigned &WhichResult,
7210                                            bool &isV_UNDEF) {
7211   isV_UNDEF = false;
7212   if (isVTRNMask(ShuffleMask, VT, WhichResult))
7213     return ARMISD::VTRN;
7214   if (isVUZPMask(ShuffleMask, VT, WhichResult))
7215     return ARMISD::VUZP;
7216   if (isVZIPMask(ShuffleMask, VT, WhichResult))
7217     return ARMISD::VZIP;
7218 
7219   isV_UNDEF = true;
7220   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
7221     return ARMISD::VTRN;
7222   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7223     return ARMISD::VUZP;
7224   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7225     return ARMISD::VZIP;
7226 
7227   return 0;
7228 }
7229 
7230 /// \return true if this is a reverse operation on an vector.
7231 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
7232   unsigned NumElts = VT.getVectorNumElements();
7233   // Make sure the mask has the right size.
7234   if (NumElts != M.size())
7235       return false;
7236 
7237   // Look for <15, ..., 3, -1, 1, 0>.
7238   for (unsigned i = 0; i != NumElts; ++i)
7239     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
7240       return false;
7241 
7242   return true;
7243 }
7244 
7245 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) {
7246   unsigned NumElts = VT.getVectorNumElements();
7247   // Make sure the mask has the right size.
7248   if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
7249       return false;
7250 
7251   // If Top
7252   //   Look for <0, N, 2, N+2, 4, N+4, ..>.
7253   //   This inserts Input2 into Input1
7254   // else if not Top
7255   //   Look for <0, N+1, 2, N+3, 4, N+5, ..>
7256   //   This inserts Input1 into Input2
7257   unsigned Offset = Top ? 0 : 1;
7258   for (unsigned i = 0; i < NumElts; i+=2) {
7259     if (M[i] >= 0 && M[i] != (int)i)
7260       return false;
7261     if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset))
7262       return false;
7263   }
7264 
7265   return true;
7266 }
7267 
7268 // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted
7269 // from a pair of inputs. For example:
7270 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7271 //             FP_ROUND(EXTRACT_ELT(Y, 0),
7272 //             FP_ROUND(EXTRACT_ELT(X, 1),
7273 //             FP_ROUND(EXTRACT_ELT(Y, 1), ...)
7274 static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
7275                                          const ARMSubtarget *ST) {
7276   assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7277   if (!ST->hasMVEFloatOps())
7278     return SDValue();
7279 
7280   SDLoc dl(BV);
7281   EVT VT = BV.getValueType();
7282   if (VT != MVT::v8f16)
7283     return SDValue();
7284 
7285   // We are looking for a buildvector of fptrunc elements, where all the
7286   // elements are interleavingly extracted from two sources. Check the first two
7287   // items are valid enough and extract some info from them (they are checked
7288   // properly in the loop below).
7289   if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND ||
7290       BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7291       BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0)
7292     return SDValue();
7293   if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND ||
7294       BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7295       BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0)
7296     return SDValue();
7297   SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7298   SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0);
7299   if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32)
7300     return SDValue();
7301 
7302   // Check all the values in the BuildVector line up with our expectations.
7303   for (unsigned i = 1; i < 4; i++) {
7304     auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7305       return Trunc.getOpcode() == ISD::FP_ROUND &&
7306              Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7307              Trunc.getOperand(0).getOperand(0) == Op &&
7308              Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7309     };
7310     if (!Check(BV.getOperand(i * 2 + 0), Op0, i))
7311       return SDValue();
7312     if (!Check(BV.getOperand(i * 2 + 1), Op1, i))
7313       return SDValue();
7314   }
7315 
7316   SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0,
7317                            DAG.getConstant(0, dl, MVT::i32));
7318   return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1,
7319                      DAG.getConstant(1, dl, MVT::i32));
7320 }
7321 
7322 // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted
7323 // from a single input on alternating lanes. For example:
7324 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7325 //             FP_ROUND(EXTRACT_ELT(X, 2),
7326 //             FP_ROUND(EXTRACT_ELT(X, 4), ...)
7327 static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
7328                                        const ARMSubtarget *ST) {
7329   assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7330   if (!ST->hasMVEFloatOps())
7331     return SDValue();
7332 
7333   SDLoc dl(BV);
7334   EVT VT = BV.getValueType();
7335   if (VT != MVT::v4f32)
7336     return SDValue();
7337 
7338   // We are looking for a buildvector of fptext elements, where all the
7339   // elements are alternating lanes from a single source. For example <0,2,4,6>
7340   // or <1,3,5,7>. Check the first two items are valid enough and extract some
7341   // info from them (they are checked properly in the loop below).
7342   if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND ||
7343       BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7344     return SDValue();
7345   SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7346   int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1);
7347   if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1))
7348     return SDValue();
7349 
7350   // Check all the values in the BuildVector line up with our expectations.
7351   for (unsigned i = 1; i < 4; i++) {
7352     auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7353       return Trunc.getOpcode() == ISD::FP_EXTEND &&
7354              Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7355              Trunc.getOperand(0).getOperand(0) == Op &&
7356              Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7357     };
7358     if (!Check(BV.getOperand(i), Op0, 2 * i + Offset))
7359       return SDValue();
7360   }
7361 
7362   return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0,
7363                      DAG.getConstant(Offset, dl, MVT::i32));
7364 }
7365 
7366 // If N is an integer constant that can be moved into a register in one
7367 // instruction, return an SDValue of such a constant (will become a MOV
7368 // instruction).  Otherwise return null.
7369 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
7370                                      const ARMSubtarget *ST, const SDLoc &dl) {
7371   uint64_t Val;
7372   if (!isa<ConstantSDNode>(N))
7373     return SDValue();
7374   Val = cast<ConstantSDNode>(N)->getZExtValue();
7375 
7376   if (ST->isThumb1Only()) {
7377     if (Val <= 255 || ~Val <= 255)
7378       return DAG.getConstant(Val, dl, MVT::i32);
7379   } else {
7380     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
7381       return DAG.getConstant(Val, dl, MVT::i32);
7382   }
7383   return SDValue();
7384 }
7385 
7386 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
7387                                     const ARMSubtarget *ST) {
7388   SDLoc dl(Op);
7389   EVT VT = Op.getValueType();
7390 
7391   assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");
7392 
7393   unsigned NumElts = VT.getVectorNumElements();
7394   unsigned BoolMask;
7395   unsigned BitsPerBool;
7396   if (NumElts == 4) {
7397     BitsPerBool = 4;
7398     BoolMask = 0xf;
7399   } else if (NumElts == 8) {
7400     BitsPerBool = 2;
7401     BoolMask = 0x3;
7402   } else if (NumElts == 16) {
7403     BitsPerBool = 1;
7404     BoolMask = 0x1;
7405   } else
7406     return SDValue();
7407 
7408   // If this is a single value copied into all lanes (a splat), we can just sign
7409   // extend that single value
7410   SDValue FirstOp = Op.getOperand(0);
7411   if (!isa<ConstantSDNode>(FirstOp) &&
7412       std::all_of(std::next(Op->op_begin()), Op->op_end(),
7413                   [&FirstOp](SDUse &U) {
7414                     return U.get().isUndef() || U.get() == FirstOp;
7415                   })) {
7416     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
7417                               DAG.getValueType(MVT::i1));
7418     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext);
7419   }
7420 
7421   // First create base with bits set where known
7422   unsigned Bits32 = 0;
7423   for (unsigned i = 0; i < NumElts; ++i) {
7424     SDValue V = Op.getOperand(i);
7425     if (!isa<ConstantSDNode>(V) && !V.isUndef())
7426       continue;
7427     bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
7428     if (BitSet)
7429       Bits32 |= BoolMask << (i * BitsPerBool);
7430   }
7431 
7432   // Add in unknown nodes
7433   SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
7434                              DAG.getConstant(Bits32, dl, MVT::i32));
7435   for (unsigned i = 0; i < NumElts; ++i) {
7436     SDValue V = Op.getOperand(i);
7437     if (isa<ConstantSDNode>(V) || V.isUndef())
7438       continue;
7439     Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
7440                        DAG.getConstant(i, dl, MVT::i32));
7441   }
7442 
7443   return Base;
7444 }
7445 
7446 // If this is a case we can't handle, return null and let the default
7447 // expansion code take care of it.
7448 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
7449                                              const ARMSubtarget *ST) const {
7450   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
7451   SDLoc dl(Op);
7452   EVT VT = Op.getValueType();
7453 
7454   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
7455     return LowerBUILD_VECTOR_i1(Op, DAG, ST);
7456 
7457   APInt SplatBits, SplatUndef;
7458   unsigned SplatBitSize;
7459   bool HasAnyUndefs;
7460   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
7461     if (SplatUndef.isAllOnesValue())
7462       return DAG.getUNDEF(VT);
7463 
7464     if ((ST->hasNEON() && SplatBitSize <= 64) ||
7465         (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) {
7466       // Check if an immediate VMOV works.
7467       EVT VmovVT;
7468       SDValue Val =
7469           isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
7470                             SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm);
7471 
7472       if (Val.getNode()) {
7473         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
7474         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7475       }
7476 
7477       // Try an immediate VMVN.
7478       uint64_t NegatedImm = (~SplatBits).getZExtValue();
7479       Val = isVMOVModifiedImm(
7480           NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT,
7481           VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
7482       if (Val.getNode()) {
7483         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
7484         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7485       }
7486 
7487       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
7488       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
7489         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
7490         if (ImmVal != -1) {
7491           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
7492           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
7493         }
7494       }
7495     }
7496   }
7497 
7498   // Scan through the operands to see if only one value is used.
7499   //
7500   // As an optimisation, even if more than one value is used it may be more
7501   // profitable to splat with one value then change some lanes.
7502   //
7503   // Heuristically we decide to do this if the vector has a "dominant" value,
7504   // defined as splatted to more than half of the lanes.
7505   unsigned NumElts = VT.getVectorNumElements();
7506   bool isOnlyLowElement = true;
7507   bool usesOnlyOneValue = true;
7508   bool hasDominantValue = false;
7509   bool isConstant = true;
7510 
7511   // Map of the number of times a particular SDValue appears in the
7512   // element list.
7513   DenseMap<SDValue, unsigned> ValueCounts;
7514   SDValue Value;
7515   for (unsigned i = 0; i < NumElts; ++i) {
7516     SDValue V = Op.getOperand(i);
7517     if (V.isUndef())
7518       continue;
7519     if (i > 0)
7520       isOnlyLowElement = false;
7521     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7522       isConstant = false;
7523 
7524     ValueCounts.insert(std::make_pair(V, 0));
7525     unsigned &Count = ValueCounts[V];
7526 
7527     // Is this value dominant? (takes up more than half of the lanes)
7528     if (++Count > (NumElts / 2)) {
7529       hasDominantValue = true;
7530       Value = V;
7531     }
7532   }
7533   if (ValueCounts.size() != 1)
7534     usesOnlyOneValue = false;
7535   if (!Value.getNode() && !ValueCounts.empty())
7536     Value = ValueCounts.begin()->first;
7537 
7538   if (ValueCounts.empty())
7539     return DAG.getUNDEF(VT);
7540 
7541   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
7542   // Keep going if we are hitting this case.
7543   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
7544     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
7545 
7546   unsigned EltSize = VT.getScalarSizeInBits();
7547 
7548   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
7549   // i32 and try again.
7550   if (hasDominantValue && EltSize <= 32) {
7551     if (!isConstant) {
7552       SDValue N;
7553 
7554       // If we are VDUPing a value that comes directly from a vector, that will
7555       // cause an unnecessary move to and from a GPR, where instead we could
7556       // just use VDUPLANE. We can only do this if the lane being extracted
7557       // is at a constant index, as the VDUP from lane instructions only have
7558       // constant-index forms.
7559       ConstantSDNode *constIndex;
7560       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7561           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
7562         // We need to create a new undef vector to use for the VDUPLANE if the
7563         // size of the vector from which we get the value is different than the
7564         // size of the vector that we need to create. We will insert the element
7565         // such that the register coalescer will remove unnecessary copies.
7566         if (VT != Value->getOperand(0).getValueType()) {
7567           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
7568                              VT.getVectorNumElements();
7569           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7570                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
7571                         Value, DAG.getConstant(index, dl, MVT::i32)),
7572                            DAG.getConstant(index, dl, MVT::i32));
7573         } else
7574           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7575                         Value->getOperand(0), Value->getOperand(1));
7576       } else
7577         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
7578 
7579       if (!usesOnlyOneValue) {
7580         // The dominant value was splatted as 'N', but we now have to insert
7581         // all differing elements.
7582         for (unsigned I = 0; I < NumElts; ++I) {
7583           if (Op.getOperand(I) == Value)
7584             continue;
7585           SmallVector<SDValue, 3> Ops;
7586           Ops.push_back(N);
7587           Ops.push_back(Op.getOperand(I));
7588           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
7589           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
7590         }
7591       }
7592       return N;
7593     }
7594     if (VT.getVectorElementType().isFloatingPoint()) {
7595       SmallVector<SDValue, 8> Ops;
7596       MVT FVT = VT.getVectorElementType().getSimpleVT();
7597       assert(FVT == MVT::f32 || FVT == MVT::f16);
7598       MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
7599       for (unsigned i = 0; i < NumElts; ++i)
7600         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
7601                                   Op.getOperand(i)));
7602       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
7603       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
7604       Val = LowerBUILD_VECTOR(Val, DAG, ST);
7605       if (Val.getNode())
7606         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7607     }
7608     if (usesOnlyOneValue) {
7609       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
7610       if (isConstant && Val.getNode())
7611         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
7612     }
7613   }
7614 
7615   // If all elements are constants and the case above didn't get hit, fall back
7616   // to the default expansion, which will generate a load from the constant
7617   // pool.
7618   if (isConstant)
7619     return SDValue();
7620 
7621   // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and
7622   // vmovn). Empirical tests suggest this is rarely worth it for vectors of
7623   // length <= 2.
7624   if (NumElts >= 4)
7625     if (SDValue shuffle = ReconstructShuffle(Op, DAG))
7626       return shuffle;
7627 
7628   // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into
7629   // VCVT's
7630   if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget))
7631     return VCVT;
7632   if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget))
7633     return VCVT;
7634 
7635   if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
7636     // If we haven't found an efficient lowering, try splitting a 128-bit vector
7637     // into two 64-bit vectors; we might discover a better way to lower it.
7638     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
7639     EVT ExtVT = VT.getVectorElementType();
7640     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
7641     SDValue Lower =
7642         DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
7643     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
7644       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
7645     SDValue Upper = DAG.getBuildVector(
7646         HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
7647     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
7648       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
7649     if (Lower && Upper)
7650       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
7651   }
7652 
7653   // Vectors with 32- or 64-bit elements can be built by directly assigning
7654   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
7655   // will be legalized.
7656   if (EltSize >= 32) {
7657     // Do the expansion with floating-point types, since that is what the VFP
7658     // registers are defined to use, and since i64 is not legal.
7659     EVT EltVT = EVT::getFloatingPointVT(EltSize);
7660     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
7661     SmallVector<SDValue, 8> Ops;
7662     for (unsigned i = 0; i < NumElts; ++i)
7663       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
7664     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
7665     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7666   }
7667 
7668   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
7669   // know the default expansion would otherwise fall back on something even
7670   // worse. For a vector with one or two non-undef values, that's
7671   // scalar_to_vector for the elements followed by a shuffle (provided the
7672   // shuffle is valid for the target) and materialization element by element
7673   // on the stack followed by a load for everything else.
7674   if (!isConstant && !usesOnlyOneValue) {
7675     SDValue Vec = DAG.getUNDEF(VT);
7676     for (unsigned i = 0 ; i < NumElts; ++i) {
7677       SDValue V = Op.getOperand(i);
7678       if (V.isUndef())
7679         continue;
7680       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
7681       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
7682     }
7683     return Vec;
7684   }
7685 
7686   return SDValue();
7687 }
7688 
7689 // Gather data to see if the operation can be modelled as a
7690 // shuffle in combination with VEXTs.
7691 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
7692                                               SelectionDAG &DAG) const {
7693   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7694   SDLoc dl(Op);
7695   EVT VT = Op.getValueType();
7696   unsigned NumElts = VT.getVectorNumElements();
7697 
7698   struct ShuffleSourceInfo {
7699     SDValue Vec;
7700     unsigned MinElt = std::numeric_limits<unsigned>::max();
7701     unsigned MaxElt = 0;
7702 
7703     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
7704     // be compatible with the shuffle we intend to construct. As a result
7705     // ShuffleVec will be some sliding window into the original Vec.
7706     SDValue ShuffleVec;
7707 
7708     // Code should guarantee that element i in Vec starts at element "WindowBase
7709     // + i * WindowScale in ShuffleVec".
7710     int WindowBase = 0;
7711     int WindowScale = 1;
7712 
7713     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
7714 
7715     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
7716   };
7717 
7718   // First gather all vectors used as an immediate source for this BUILD_VECTOR
7719   // node.
7720   SmallVector<ShuffleSourceInfo, 2> Sources;
7721   for (unsigned i = 0; i < NumElts; ++i) {
7722     SDValue V = Op.getOperand(i);
7723     if (V.isUndef())
7724       continue;
7725     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
7726       // A shuffle can only come from building a vector from various
7727       // elements of other vectors.
7728       return SDValue();
7729     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
7730       // Furthermore, shuffles require a constant mask, whereas extractelts
7731       // accept variable indices.
7732       return SDValue();
7733     }
7734 
7735     // Add this element source to the list if it's not already there.
7736     SDValue SourceVec = V.getOperand(0);
7737     auto Source = llvm::find(Sources, SourceVec);
7738     if (Source == Sources.end())
7739       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
7740 
7741     // Update the minimum and maximum lane number seen.
7742     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
7743     Source->MinElt = std::min(Source->MinElt, EltNo);
7744     Source->MaxElt = std::max(Source->MaxElt, EltNo);
7745   }
7746 
7747   // Currently only do something sane when at most two source vectors
7748   // are involved.
7749   if (Sources.size() > 2)
7750     return SDValue();
7751 
7752   // Find out the smallest element size among result and two sources, and use
7753   // it as element size to build the shuffle_vector.
7754   EVT SmallestEltTy = VT.getVectorElementType();
7755   for (auto &Source : Sources) {
7756     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
7757     if (SrcEltTy.bitsLT(SmallestEltTy))
7758       SmallestEltTy = SrcEltTy;
7759   }
7760   unsigned ResMultiplier =
7761       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
7762   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
7763   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
7764 
7765   // If the source vector is too wide or too narrow, we may nevertheless be able
7766   // to construct a compatible shuffle either by concatenating it with UNDEF or
7767   // extracting a suitable range of elements.
7768   for (auto &Src : Sources) {
7769     EVT SrcVT = Src.ShuffleVec.getValueType();
7770 
7771     if (SrcVT.getSizeInBits() == VT.getSizeInBits())
7772       continue;
7773 
7774     // This stage of the search produces a source with the same element type as
7775     // the original, but with a total width matching the BUILD_VECTOR output.
7776     EVT EltVT = SrcVT.getVectorElementType();
7777     unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
7778     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
7779 
7780     if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
7781       if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
7782         return SDValue();
7783       // We can pad out the smaller vector for free, so if it's part of a
7784       // shuffle...
7785       Src.ShuffleVec =
7786           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
7787                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
7788       continue;
7789     }
7790 
7791     if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
7792       return SDValue();
7793 
7794     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
7795       // Span too large for a VEXT to cope
7796       return SDValue();
7797     }
7798 
7799     if (Src.MinElt >= NumSrcElts) {
7800       // The extraction can just take the second half
7801       Src.ShuffleVec =
7802           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7803                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
7804       Src.WindowBase = -NumSrcElts;
7805     } else if (Src.MaxElt < NumSrcElts) {
7806       // The extraction can just take the first half
7807       Src.ShuffleVec =
7808           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7809                       DAG.getConstant(0, dl, MVT::i32));
7810     } else {
7811       // An actual VEXT is needed
7812       SDValue VEXTSrc1 =
7813           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7814                       DAG.getConstant(0, dl, MVT::i32));
7815       SDValue VEXTSrc2 =
7816           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7817                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
7818 
7819       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
7820                                    VEXTSrc2,
7821                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
7822       Src.WindowBase = -Src.MinElt;
7823     }
7824   }
7825 
7826   // Another possible incompatibility occurs from the vector element types. We
7827   // can fix this by bitcasting the source vectors to the same type we intend
7828   // for the shuffle.
7829   for (auto &Src : Sources) {
7830     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
7831     if (SrcEltTy == SmallestEltTy)
7832       continue;
7833     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
7834     Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec);
7835     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
7836     Src.WindowBase *= Src.WindowScale;
7837   }
7838 
7839   // Final sanity check before we try to actually produce a shuffle.
7840   LLVM_DEBUG(for (auto Src
7841                   : Sources)
7842                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
7843 
7844   // The stars all align, our next step is to produce the mask for the shuffle.
7845   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
7846   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
7847   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
7848     SDValue Entry = Op.getOperand(i);
7849     if (Entry.isUndef())
7850       continue;
7851 
7852     auto Src = llvm::find(Sources, Entry.getOperand(0));
7853     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
7854 
7855     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
7856     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
7857     // segment.
7858     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
7859     int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
7860                                VT.getScalarSizeInBits());
7861     int LanesDefined = BitsDefined / BitsPerShuffleLane;
7862 
7863     // This source is expected to fill ResMultiplier lanes of the final shuffle,
7864     // starting at the appropriate offset.
7865     int *LaneMask = &Mask[i * ResMultiplier];
7866 
7867     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7868     ExtractBase += NumElts * (Src - Sources.begin());
7869     for (int j = 0; j < LanesDefined; ++j)
7870       LaneMask[j] = ExtractBase + j;
7871   }
7872 
7873 
7874   // We can't handle more than two sources. This should have already
7875   // been checked before this point.
7876   assert(Sources.size() <= 2 && "Too many sources!");
7877 
7878   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
7879   for (unsigned i = 0; i < Sources.size(); ++i)
7880     ShuffleOps[i] = Sources[i].ShuffleVec;
7881 
7882   SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
7883                                             ShuffleOps[1], Mask, DAG);
7884   if (!Shuffle)
7885     return SDValue();
7886   return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle);
7887 }
7888 
7889 enum ShuffleOpCodes {
7890   OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7891   OP_VREV,
7892   OP_VDUP0,
7893   OP_VDUP1,
7894   OP_VDUP2,
7895   OP_VDUP3,
7896   OP_VEXT1,
7897   OP_VEXT2,
7898   OP_VEXT3,
7899   OP_VUZPL, // VUZP, left result
7900   OP_VUZPR, // VUZP, right result
7901   OP_VZIPL, // VZIP, left result
7902   OP_VZIPR, // VZIP, right result
7903   OP_VTRNL, // VTRN, left result
7904   OP_VTRNR  // VTRN, right result
7905 };
7906 
7907 static bool isLegalMVEShuffleOp(unsigned PFEntry) {
7908   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7909   switch (OpNum) {
7910   case OP_COPY:
7911   case OP_VREV:
7912   case OP_VDUP0:
7913   case OP_VDUP1:
7914   case OP_VDUP2:
7915   case OP_VDUP3:
7916     return true;
7917   }
7918   return false;
7919 }
7920 
7921 /// isShuffleMaskLegal - Targets can use this to indicate that they only
7922 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
7923 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
7924 /// are assumed to be legal.
7925 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
7926   if (VT.getVectorNumElements() == 4 &&
7927       (VT.is128BitVector() || VT.is64BitVector())) {
7928     unsigned PFIndexes[4];
7929     for (unsigned i = 0; i != 4; ++i) {
7930       if (M[i] < 0)
7931         PFIndexes[i] = 8;
7932       else
7933         PFIndexes[i] = M[i];
7934     }
7935 
7936     // Compute the index in the perfect shuffle table.
7937     unsigned PFTableIndex =
7938       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7939     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7940     unsigned Cost = (PFEntry >> 30);
7941 
7942     if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
7943       return true;
7944   }
7945 
7946   bool ReverseVEXT, isV_UNDEF;
7947   unsigned Imm, WhichResult;
7948 
7949   unsigned EltSize = VT.getScalarSizeInBits();
7950   if (EltSize >= 32 ||
7951       ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
7952       ShuffleVectorInst::isIdentityMask(M) ||
7953       isVREVMask(M, VT, 64) ||
7954       isVREVMask(M, VT, 32) ||
7955       isVREVMask(M, VT, 16))
7956     return true;
7957   else if (Subtarget->hasNEON() &&
7958            (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
7959             isVTBLMask(M, VT) ||
7960             isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
7961     return true;
7962   else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) &&
7963            isReverseMask(M, VT))
7964     return true;
7965   else if (Subtarget->hasMVEIntegerOps() &&
7966            (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1)))
7967     return true;
7968   else
7969     return false;
7970 }
7971 
7972 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7973 /// the specified operations to build the shuffle.
7974 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7975                                       SDValue RHS, SelectionDAG &DAG,
7976                                       const SDLoc &dl) {
7977   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7978   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7979   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7980 
7981   if (OpNum == OP_COPY) {
7982     if (LHSID == (1*9+2)*9+3) return LHS;
7983     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7984     return RHS;
7985   }
7986 
7987   SDValue OpLHS, OpRHS;
7988   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7989   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7990   EVT VT = OpLHS.getValueType();
7991 
7992   switch (OpNum) {
7993   default: llvm_unreachable("Unknown shuffle opcode!");
7994   case OP_VREV:
7995     // VREV divides the vector in half and swaps within the half.
7996     if (VT.getVectorElementType() == MVT::i32 ||
7997         VT.getVectorElementType() == MVT::f32)
7998       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
7999     // vrev <4 x i16> -> VREV32
8000     if (VT.getVectorElementType() == MVT::i16)
8001       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
8002     // vrev <4 x i8> -> VREV16
8003     assert(VT.getVectorElementType() == MVT::i8);
8004     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
8005   case OP_VDUP0:
8006   case OP_VDUP1:
8007   case OP_VDUP2:
8008   case OP_VDUP3:
8009     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
8010                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
8011   case OP_VEXT1:
8012   case OP_VEXT2:
8013   case OP_VEXT3:
8014     return DAG.getNode(ARMISD::VEXT, dl, VT,
8015                        OpLHS, OpRHS,
8016                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
8017   case OP_VUZPL:
8018   case OP_VUZPR:
8019     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
8020                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
8021   case OP_VZIPL:
8022   case OP_VZIPR:
8023     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
8024                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
8025   case OP_VTRNL:
8026   case OP_VTRNR:
8027     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
8028                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
8029   }
8030 }
8031 
8032 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
8033                                        ArrayRef<int> ShuffleMask,
8034                                        SelectionDAG &DAG) {
8035   // Check to see if we can use the VTBL instruction.
8036   SDValue V1 = Op.getOperand(0);
8037   SDValue V2 = Op.getOperand(1);
8038   SDLoc DL(Op);
8039 
8040   SmallVector<SDValue, 8> VTBLMask;
8041   for (ArrayRef<int>::iterator
8042          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
8043     VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
8044 
8045   if (V2.getNode()->isUndef())
8046     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
8047                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8048 
8049   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
8050                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8051 }
8052 
8053 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
8054                                                       SelectionDAG &DAG) {
8055   SDLoc DL(Op);
8056   SDValue OpLHS = Op.getOperand(0);
8057   EVT VT = OpLHS.getValueType();
8058 
8059   assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
8060          "Expect an v8i16/v16i8 type");
8061   OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
8062   // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
8063   // extract the first 8 bytes into the top double word and the last 8 bytes
8064   // into the bottom double word. The v8i16 case is similar.
8065   unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
8066   return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
8067                      DAG.getConstant(ExtractNum, DL, MVT::i32));
8068 }
8069 
8070 static EVT getVectorTyFromPredicateVector(EVT VT) {
8071   switch (VT.getSimpleVT().SimpleTy) {
8072   case MVT::v4i1:
8073     return MVT::v4i32;
8074   case MVT::v8i1:
8075     return MVT::v8i16;
8076   case MVT::v16i1:
8077     return MVT::v16i8;
8078   default:
8079     llvm_unreachable("Unexpected vector predicate type");
8080   }
8081 }
8082 
8083 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
8084                                     SelectionDAG &DAG) {
8085   // Converting from boolean predicates to integers involves creating a vector
8086   // of all ones or all zeroes and selecting the lanes based upon the real
8087   // predicate.
8088   SDValue AllOnes =
8089       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32);
8090   AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes);
8091 
8092   SDValue AllZeroes =
8093       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32);
8094   AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes);
8095 
8096   // Get full vector type from predicate type
8097   EVT NewVT = getVectorTyFromPredicateVector(VT);
8098 
8099   SDValue RecastV1;
8100   // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
8101   // this to a v16i1. This cannot be done with an ordinary bitcast because the
8102   // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
8103   // since we know in hardware the sizes are really the same.
8104   if (VT != MVT::v16i1)
8105     RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred);
8106   else
8107     RecastV1 = Pred;
8108 
8109   // Select either all ones or zeroes depending upon the real predicate bits.
8110   SDValue PredAsVector =
8111       DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes);
8112 
8113   // Recast our new predicate-as-integer v16i8 vector into something
8114   // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
8115   return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector);
8116 }
8117 
8118 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
8119                                       const ARMSubtarget *ST) {
8120   EVT VT = Op.getValueType();
8121   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8122   ArrayRef<int> ShuffleMask = SVN->getMask();
8123 
8124   assert(ST->hasMVEIntegerOps() &&
8125          "No support for vector shuffle of boolean predicates");
8126 
8127   SDValue V1 = Op.getOperand(0);
8128   SDLoc dl(Op);
8129   if (isReverseMask(ShuffleMask, VT)) {
8130     SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1);
8131     SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast);
8132     SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit,
8133                               DAG.getConstant(16, dl, MVT::i32));
8134     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl);
8135   }
8136 
8137   // Until we can come up with optimised cases for every single vector
8138   // shuffle in existence we have chosen the least painful strategy. This is
8139   // to essentially promote the boolean predicate to a 8-bit integer, where
8140   // each predicate represents a byte. Then we fall back on a normal integer
8141   // vector shuffle and convert the result back into a predicate vector. In
8142   // many cases the generated code might be even better than scalar code
8143   // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
8144   // fields in a register into 8 other arbitrary 2-bit fields!
8145   SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG);
8146   EVT NewVT = PredAsVector.getValueType();
8147 
8148   // Do the shuffle!
8149   SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector,
8150                                           DAG.getUNDEF(NewVT), ShuffleMask);
8151 
8152   // Now return the result of comparing the shuffled vector with zero,
8153   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8154   return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled,
8155                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8156 }
8157 
8158 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
8159                                             ArrayRef<int> ShuffleMask,
8160                                             SelectionDAG &DAG) {
8161   // Attempt to lower the vector shuffle using as many whole register movs as
8162   // possible. This is useful for types smaller than 32bits, which would
8163   // often otherwise become a series for grp movs.
8164   SDLoc dl(Op);
8165   EVT VT = Op.getValueType();
8166   if (VT.getScalarSizeInBits() >= 32)
8167     return SDValue();
8168 
8169   assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8170          "Unexpected vector type");
8171   int NumElts = VT.getVectorNumElements();
8172   int QuarterSize = NumElts / 4;
8173   // The four final parts of the vector, as i32's
8174   SDValue Parts[4];
8175 
8176   // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not
8177   // <u,u,u,u>), returning the vmov lane index
8178   auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) {
8179     // Detect which mov lane this would be from the first non-undef element.
8180     int MovIdx = -1;
8181     for (int i = 0; i < Length; i++) {
8182       if (ShuffleMask[Start + i] >= 0) {
8183         if (ShuffleMask[Start + i] % Length != i)
8184           return -1;
8185         MovIdx = ShuffleMask[Start + i] / Length;
8186         break;
8187       }
8188     }
8189     // If all items are undef, leave this for other combines
8190     if (MovIdx == -1)
8191       return -1;
8192     // Check the remaining values are the correct part of the same mov
8193     for (int i = 1; i < Length; i++) {
8194       if (ShuffleMask[Start + i] >= 0 &&
8195           (ShuffleMask[Start + i] / Length != MovIdx ||
8196            ShuffleMask[Start + i] % Length != i))
8197         return -1;
8198     }
8199     return MovIdx;
8200   };
8201 
8202   for (int Part = 0; Part < 4; ++Part) {
8203     // Does this part look like a mov
8204     int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
8205     if (Elt != -1) {
8206       SDValue Input = Op->getOperand(0);
8207       if (Elt >= 4) {
8208         Input = Op->getOperand(1);
8209         Elt -= 4;
8210       }
8211       SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input);
8212       Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast,
8213                                 DAG.getConstant(Elt, dl, MVT::i32));
8214     }
8215   }
8216 
8217   // Nothing interesting found, just return
8218   if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
8219     return SDValue();
8220 
8221   // The other parts need to be built with the old shuffle vector, cast to a
8222   // v4i32 and extract_vector_elts
8223   if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
8224     SmallVector<int, 16> NewShuffleMask;
8225     for (int Part = 0; Part < 4; ++Part)
8226       for (int i = 0; i < QuarterSize; i++)
8227         NewShuffleMask.push_back(
8228             Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
8229     SDValue NewShuffle = DAG.getVectorShuffle(
8230         VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask);
8231     SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle);
8232 
8233     for (int Part = 0; Part < 4; ++Part)
8234       if (!Parts[Part])
8235         Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
8236                                   BitCast, DAG.getConstant(Part, dl, MVT::i32));
8237   }
8238   // Build a vector out of the various parts and bitcast it back to the original
8239   // type.
8240   SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts);
8241   return DAG.getBitcast(VT, NewVec);
8242 }
8243 
8244 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
8245                                    const ARMSubtarget *ST) {
8246   SDValue V1 = Op.getOperand(0);
8247   SDValue V2 = Op.getOperand(1);
8248   SDLoc dl(Op);
8249   EVT VT = Op.getValueType();
8250   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8251   unsigned EltSize = VT.getScalarSizeInBits();
8252 
8253   if (ST->hasMVEIntegerOps() && EltSize == 1)
8254     return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST);
8255 
8256   // Convert shuffles that are directly supported on NEON to target-specific
8257   // DAG nodes, instead of keeping them as shuffles and matching them again
8258   // during code selection.  This is more efficient and avoids the possibility
8259   // of inconsistencies between legalization and selection.
8260   // FIXME: floating-point vectors should be canonicalized to integer vectors
8261   // of the same time so that they get CSEd properly.
8262   ArrayRef<int> ShuffleMask = SVN->getMask();
8263 
8264   if (EltSize <= 32) {
8265     if (SVN->isSplat()) {
8266       int Lane = SVN->getSplatIndex();
8267       // If this is undef splat, generate it via "just" vdup, if possible.
8268       if (Lane == -1) Lane = 0;
8269 
8270       // Test if V1 is a SCALAR_TO_VECTOR.
8271       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
8272         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8273       }
8274       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
8275       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
8276       // reaches it).
8277       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
8278           !isa<ConstantSDNode>(V1.getOperand(0))) {
8279         bool IsScalarToVector = true;
8280         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
8281           if (!V1.getOperand(i).isUndef()) {
8282             IsScalarToVector = false;
8283             break;
8284           }
8285         if (IsScalarToVector)
8286           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8287       }
8288       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
8289                          DAG.getConstant(Lane, dl, MVT::i32));
8290     }
8291 
8292     bool ReverseVEXT = false;
8293     unsigned Imm = 0;
8294     if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
8295       if (ReverseVEXT)
8296         std::swap(V1, V2);
8297       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
8298                          DAG.getConstant(Imm, dl, MVT::i32));
8299     }
8300 
8301     if (isVREVMask(ShuffleMask, VT, 64))
8302       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
8303     if (isVREVMask(ShuffleMask, VT, 32))
8304       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
8305     if (isVREVMask(ShuffleMask, VT, 16))
8306       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
8307 
8308     if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
8309       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
8310                          DAG.getConstant(Imm, dl, MVT::i32));
8311     }
8312 
8313     // Check for Neon shuffles that modify both input vectors in place.
8314     // If both results are used, i.e., if there are two shuffles with the same
8315     // source operands and with masks corresponding to both results of one of
8316     // these operations, DAG memoization will ensure that a single node is
8317     // used for both shuffles.
8318     unsigned WhichResult = 0;
8319     bool isV_UNDEF = false;
8320     if (ST->hasNEON()) {
8321       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8322               ShuffleMask, VT, WhichResult, isV_UNDEF)) {
8323         if (isV_UNDEF)
8324           V2 = V1;
8325         return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
8326             .getValue(WhichResult);
8327       }
8328     }
8329     if (ST->hasMVEIntegerOps()) {
8330       if (isVMOVNMask(ShuffleMask, VT, 0))
8331         return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1,
8332                            DAG.getConstant(0, dl, MVT::i32));
8333       if (isVMOVNMask(ShuffleMask, VT, 1))
8334         return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2,
8335                            DAG.getConstant(1, dl, MVT::i32));
8336     }
8337 
8338     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
8339     // shuffles that produce a result larger than their operands with:
8340     //   shuffle(concat(v1, undef), concat(v2, undef))
8341     // ->
8342     //   shuffle(concat(v1, v2), undef)
8343     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
8344     //
8345     // This is useful in the general case, but there are special cases where
8346     // native shuffles produce larger results: the two-result ops.
8347     //
8348     // Look through the concat when lowering them:
8349     //   shuffle(concat(v1, v2), undef)
8350     // ->
8351     //   concat(VZIP(v1, v2):0, :1)
8352     //
8353     if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
8354       SDValue SubV1 = V1->getOperand(0);
8355       SDValue SubV2 = V1->getOperand(1);
8356       EVT SubVT = SubV1.getValueType();
8357 
8358       // We expect these to have been canonicalized to -1.
8359       assert(llvm::all_of(ShuffleMask, [&](int i) {
8360         return i < (int)VT.getVectorNumElements();
8361       }) && "Unexpected shuffle index into UNDEF operand!");
8362 
8363       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8364               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
8365         if (isV_UNDEF)
8366           SubV2 = SubV1;
8367         assert((WhichResult == 0) &&
8368                "In-place shuffle of concat can only have one result!");
8369         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
8370                                   SubV1, SubV2);
8371         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
8372                            Res.getValue(1));
8373       }
8374     }
8375   }
8376 
8377   // If the shuffle is not directly supported and it has 4 elements, use
8378   // the PerfectShuffle-generated table to synthesize it from other shuffles.
8379   unsigned NumElts = VT.getVectorNumElements();
8380   if (NumElts == 4) {
8381     unsigned PFIndexes[4];
8382     for (unsigned i = 0; i != 4; ++i) {
8383       if (ShuffleMask[i] < 0)
8384         PFIndexes[i] = 8;
8385       else
8386         PFIndexes[i] = ShuffleMask[i];
8387     }
8388 
8389     // Compute the index in the perfect shuffle table.
8390     unsigned PFTableIndex =
8391       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8392     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8393     unsigned Cost = (PFEntry >> 30);
8394 
8395     if (Cost <= 4) {
8396       if (ST->hasNEON())
8397         return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8398       else if (isLegalMVEShuffleOp(PFEntry)) {
8399         unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8400         unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8401         unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
8402         unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
8403         if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
8404           return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8405       }
8406     }
8407   }
8408 
8409   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
8410   if (EltSize >= 32) {
8411     // Do the expansion with floating-point types, since that is what the VFP
8412     // registers are defined to use, and since i64 is not legal.
8413     EVT EltVT = EVT::getFloatingPointVT(EltSize);
8414     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
8415     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
8416     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
8417     SmallVector<SDValue, 8> Ops;
8418     for (unsigned i = 0; i < NumElts; ++i) {
8419       if (ShuffleMask[i] < 0)
8420         Ops.push_back(DAG.getUNDEF(EltVT));
8421       else
8422         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
8423                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
8424                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
8425                                                   dl, MVT::i32)));
8426     }
8427     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
8428     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8429   }
8430 
8431   if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
8432     return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
8433 
8434   if (ST->hasNEON() && VT == MVT::v8i8)
8435     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
8436       return NewOp;
8437 
8438   if (ST->hasMVEIntegerOps())
8439     if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG))
8440       return NewOp;
8441 
8442   return SDValue();
8443 }
8444 
8445 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8446                                          const ARMSubtarget *ST) {
8447   EVT VecVT = Op.getOperand(0).getValueType();
8448   SDLoc dl(Op);
8449 
8450   assert(ST->hasMVEIntegerOps() &&
8451          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8452 
8453   SDValue Conv =
8454       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8455   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
8456   unsigned LaneWidth =
8457       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8458   unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
8459   SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32,
8460                             Op.getOperand(1), DAG.getValueType(MVT::i1));
8461   SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext,
8462                             DAG.getConstant(~Mask, dl, MVT::i32));
8463   return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI);
8464 }
8465 
8466 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
8467                                                   SelectionDAG &DAG) const {
8468   // INSERT_VECTOR_ELT is legal only for immediate indexes.
8469   SDValue Lane = Op.getOperand(2);
8470   if (!isa<ConstantSDNode>(Lane))
8471     return SDValue();
8472 
8473   SDValue Elt = Op.getOperand(1);
8474   EVT EltVT = Elt.getValueType();
8475 
8476   if (Subtarget->hasMVEIntegerOps() &&
8477       Op.getValueType().getScalarSizeInBits() == 1)
8478     return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget);
8479 
8480   if (getTypeAction(*DAG.getContext(), EltVT) ==
8481       TargetLowering::TypePromoteFloat) {
8482     // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
8483     // but the type system will try to do that if we don't intervene.
8484     // Reinterpret any such vector-element insertion as one with the
8485     // corresponding integer types.
8486 
8487     SDLoc dl(Op);
8488 
8489     EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
8490     assert(getTypeAction(*DAG.getContext(), IEltVT) !=
8491            TargetLowering::TypePromoteFloat);
8492 
8493     SDValue VecIn = Op.getOperand(0);
8494     EVT VecVT = VecIn.getValueType();
8495     EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
8496                                   VecVT.getVectorNumElements());
8497 
8498     SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
8499     SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
8500     SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
8501                                   IVecIn, IElt, Lane);
8502     return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
8503   }
8504 
8505   return Op;
8506 }
8507 
8508 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8509                                           const ARMSubtarget *ST) {
8510   EVT VecVT = Op.getOperand(0).getValueType();
8511   SDLoc dl(Op);
8512 
8513   assert(ST->hasMVEIntegerOps() &&
8514          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8515 
8516   SDValue Conv =
8517       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8518   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
8519   unsigned LaneWidth =
8520       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8521   SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
8522                               DAG.getConstant(Lane * LaneWidth, dl, MVT::i32));
8523   return Shift;
8524 }
8525 
8526 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
8527                                        const ARMSubtarget *ST) {
8528   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
8529   SDValue Lane = Op.getOperand(1);
8530   if (!isa<ConstantSDNode>(Lane))
8531     return SDValue();
8532 
8533   SDValue Vec = Op.getOperand(0);
8534   EVT VT = Vec.getValueType();
8535 
8536   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8537     return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST);
8538 
8539   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
8540     SDLoc dl(Op);
8541     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
8542   }
8543 
8544   return Op;
8545 }
8546 
8547 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
8548                                       const ARMSubtarget *ST) {
8549   SDValue V1 = Op.getOperand(0);
8550   SDValue V2 = Op.getOperand(1);
8551   SDLoc dl(Op);
8552   EVT VT = Op.getValueType();
8553   EVT Op1VT = V1.getValueType();
8554   EVT Op2VT = V2.getValueType();
8555   unsigned NumElts = VT.getVectorNumElements();
8556 
8557   assert(Op1VT == Op2VT && "Operand types don't match!");
8558   assert(VT.getScalarSizeInBits() == 1 &&
8559          "Unexpected custom CONCAT_VECTORS lowering");
8560   assert(ST->hasMVEIntegerOps() &&
8561          "CONCAT_VECTORS lowering only supported for MVE");
8562 
8563   SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8564   SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG);
8565 
8566   // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
8567   // promoted to v8i16, etc.
8568 
8569   MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8570 
8571   // Extract the vector elements from Op1 and Op2 one by one and truncate them
8572   // to be the right size for the destination. For example, if Op1 is v4i1 then
8573   // the promoted vector is v4i32. The result of concatentation gives a v8i1,
8574   // which when promoted is v8i16. That means each i32 element from Op1 needs
8575   // truncating to i16 and inserting in the result.
8576   EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
8577   SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT);
8578   auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) {
8579     EVT NewVT = NewV.getValueType();
8580     EVT ConcatVT = ConVec.getValueType();
8581     for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) {
8582       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV,
8583                                 DAG.getIntPtrConstant(i, dl));
8584       ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt,
8585                            DAG.getConstant(j, dl, MVT::i32));
8586     }
8587     return ConVec;
8588   };
8589   unsigned j = 0;
8590   ConVec = ExractInto(NewV1, ConVec, j);
8591   ConVec = ExractInto(NewV2, ConVec, j);
8592 
8593   // Now return the result of comparing the subvector with zero,
8594   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8595   return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
8596                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8597 }
8598 
8599 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
8600                                    const ARMSubtarget *ST) {
8601   EVT VT = Op->getValueType(0);
8602   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8603     return LowerCONCAT_VECTORS_i1(Op, DAG, ST);
8604 
8605   // The only time a CONCAT_VECTORS operation can have legal types is when
8606   // two 64-bit vectors are concatenated to a 128-bit vector.
8607   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
8608          "unexpected CONCAT_VECTORS");
8609   SDLoc dl(Op);
8610   SDValue Val = DAG.getUNDEF(MVT::v2f64);
8611   SDValue Op0 = Op.getOperand(0);
8612   SDValue Op1 = Op.getOperand(1);
8613   if (!Op0.isUndef())
8614     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8615                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
8616                       DAG.getIntPtrConstant(0, dl));
8617   if (!Op1.isUndef())
8618     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8619                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
8620                       DAG.getIntPtrConstant(1, dl));
8621   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
8622 }
8623 
8624 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
8625                                       const ARMSubtarget *ST) {
8626   SDValue V1 = Op.getOperand(0);
8627   SDValue V2 = Op.getOperand(1);
8628   SDLoc dl(Op);
8629   EVT VT = Op.getValueType();
8630   EVT Op1VT = V1.getValueType();
8631   unsigned NumElts = VT.getVectorNumElements();
8632   unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
8633 
8634   assert(VT.getScalarSizeInBits() == 1 &&
8635          "Unexpected custom EXTRACT_SUBVECTOR lowering");
8636   assert(ST->hasMVEIntegerOps() &&
8637          "EXTRACT_SUBVECTOR lowering only supported for MVE");
8638 
8639   SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8640 
8641   // We now have Op1 promoted to a vector of integers, where v8i1 gets
8642   // promoted to v8i16, etc.
8643 
8644   MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8645 
8646   EVT SubVT = MVT::getVectorVT(ElType, NumElts);
8647   SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
8648   for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
8649     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
8650                               DAG.getIntPtrConstant(i, dl));
8651     SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
8652                          DAG.getConstant(j, dl, MVT::i32));
8653   }
8654 
8655   // Now return the result of comparing the subvector with zero,
8656   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8657   return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
8658                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8659 }
8660 
8661 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
8662 /// element has been zero/sign-extended, depending on the isSigned parameter,
8663 /// from an integer type half its size.
8664 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
8665                                    bool isSigned) {
8666   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
8667   EVT VT = N->getValueType(0);
8668   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
8669     SDNode *BVN = N->getOperand(0).getNode();
8670     if (BVN->getValueType(0) != MVT::v4i32 ||
8671         BVN->getOpcode() != ISD::BUILD_VECTOR)
8672       return false;
8673     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8674     unsigned HiElt = 1 - LoElt;
8675     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
8676     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
8677     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
8678     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
8679     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
8680       return false;
8681     if (isSigned) {
8682       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
8683           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
8684         return true;
8685     } else {
8686       if (Hi0->isNullValue() && Hi1->isNullValue())
8687         return true;
8688     }
8689     return false;
8690   }
8691 
8692   if (N->getOpcode() != ISD::BUILD_VECTOR)
8693     return false;
8694 
8695   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
8696     SDNode *Elt = N->getOperand(i).getNode();
8697     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
8698       unsigned EltSize = VT.getScalarSizeInBits();
8699       unsigned HalfSize = EltSize / 2;
8700       if (isSigned) {
8701         if (!isIntN(HalfSize, C->getSExtValue()))
8702           return false;
8703       } else {
8704         if (!isUIntN(HalfSize, C->getZExtValue()))
8705           return false;
8706       }
8707       continue;
8708     }
8709     return false;
8710   }
8711 
8712   return true;
8713 }
8714 
8715 /// isSignExtended - Check if a node is a vector value that is sign-extended
8716 /// or a constant BUILD_VECTOR with sign-extended elements.
8717 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
8718   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
8719     return true;
8720   if (isExtendedBUILD_VECTOR(N, DAG, true))
8721     return true;
8722   return false;
8723 }
8724 
8725 /// isZeroExtended - Check if a node is a vector value that is zero-extended
8726 /// or a constant BUILD_VECTOR with zero-extended elements.
8727 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
8728   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
8729     return true;
8730   if (isExtendedBUILD_VECTOR(N, DAG, false))
8731     return true;
8732   return false;
8733 }
8734 
8735 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
8736   if (OrigVT.getSizeInBits() >= 64)
8737     return OrigVT;
8738 
8739   assert(OrigVT.isSimple() && "Expecting a simple value type");
8740 
8741   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
8742   switch (OrigSimpleTy) {
8743   default: llvm_unreachable("Unexpected Vector Type");
8744   case MVT::v2i8:
8745   case MVT::v2i16:
8746      return MVT::v2i32;
8747   case MVT::v4i8:
8748     return  MVT::v4i16;
8749   }
8750 }
8751 
8752 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
8753 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
8754 /// We insert the required extension here to get the vector to fill a D register.
8755 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
8756                                             const EVT &OrigTy,
8757                                             const EVT &ExtTy,
8758                                             unsigned ExtOpcode) {
8759   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
8760   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
8761   // 64-bits we need to insert a new extension so that it will be 64-bits.
8762   assert(ExtTy.is128BitVector() && "Unexpected extension size");
8763   if (OrigTy.getSizeInBits() >= 64)
8764     return N;
8765 
8766   // Must extend size to at least 64 bits to be used as an operand for VMULL.
8767   EVT NewVT = getExtensionTo64Bits(OrigTy);
8768 
8769   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
8770 }
8771 
8772 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
8773 /// does not do any sign/zero extension. If the original vector is less
8774 /// than 64 bits, an appropriate extension will be added after the load to
8775 /// reach a total size of 64 bits. We have to add the extension separately
8776 /// because ARM does not have a sign/zero extending load for vectors.
8777 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
8778   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
8779 
8780   // The load already has the right type.
8781   if (ExtendedTy == LD->getMemoryVT())
8782     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
8783                        LD->getBasePtr(), LD->getPointerInfo(),
8784                        LD->getAlignment(), LD->getMemOperand()->getFlags());
8785 
8786   // We need to create a zextload/sextload. We cannot just create a load
8787   // followed by a zext/zext node because LowerMUL is also run during normal
8788   // operation legalization where we can't create illegal types.
8789   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
8790                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
8791                         LD->getMemoryVT(), LD->getAlignment(),
8792                         LD->getMemOperand()->getFlags());
8793 }
8794 
8795 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
8796 /// extending load, or BUILD_VECTOR with extended elements, return the
8797 /// unextended value. The unextended vector should be 64 bits so that it can
8798 /// be used as an operand to a VMULL instruction. If the original vector size
8799 /// before extension is less than 64 bits we add a an extension to resize
8800 /// the vector to 64 bits.
8801 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
8802   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
8803     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
8804                                         N->getOperand(0)->getValueType(0),
8805                                         N->getValueType(0),
8806                                         N->getOpcode());
8807 
8808   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8809     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
8810            "Expected extending load");
8811 
8812     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
8813     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
8814     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8815     SDValue extLoad =
8816         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
8817     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
8818 
8819     return newLoad;
8820   }
8821 
8822   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
8823   // have been legalized as a BITCAST from v4i32.
8824   if (N->getOpcode() == ISD::BITCAST) {
8825     SDNode *BVN = N->getOperand(0).getNode();
8826     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
8827            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
8828     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8829     return DAG.getBuildVector(
8830         MVT::v2i32, SDLoc(N),
8831         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
8832   }
8833   // Construct a new BUILD_VECTOR with elements truncated to half the size.
8834   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
8835   EVT VT = N->getValueType(0);
8836   unsigned EltSize = VT.getScalarSizeInBits() / 2;
8837   unsigned NumElts = VT.getVectorNumElements();
8838   MVT TruncVT = MVT::getIntegerVT(EltSize);
8839   SmallVector<SDValue, 8> Ops;
8840   SDLoc dl(N);
8841   for (unsigned i = 0; i != NumElts; ++i) {
8842     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
8843     const APInt &CInt = C->getAPIntValue();
8844     // Element types smaller than 32 bits are not legal, so use i32 elements.
8845     // The values are implicitly truncated so sext vs. zext doesn't matter.
8846     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
8847   }
8848   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
8849 }
8850 
8851 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
8852   unsigned Opcode = N->getOpcode();
8853   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8854     SDNode *N0 = N->getOperand(0).getNode();
8855     SDNode *N1 = N->getOperand(1).getNode();
8856     return N0->hasOneUse() && N1->hasOneUse() &&
8857       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
8858   }
8859   return false;
8860 }
8861 
8862 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
8863   unsigned Opcode = N->getOpcode();
8864   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8865     SDNode *N0 = N->getOperand(0).getNode();
8866     SDNode *N1 = N->getOperand(1).getNode();
8867     return N0->hasOneUse() && N1->hasOneUse() &&
8868       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
8869   }
8870   return false;
8871 }
8872 
8873 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
8874   // Multiplications are only custom-lowered for 128-bit vectors so that
8875   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
8876   EVT VT = Op.getValueType();
8877   assert(VT.is128BitVector() && VT.isInteger() &&
8878          "unexpected type for custom-lowering ISD::MUL");
8879   SDNode *N0 = Op.getOperand(0).getNode();
8880   SDNode *N1 = Op.getOperand(1).getNode();
8881   unsigned NewOpc = 0;
8882   bool isMLA = false;
8883   bool isN0SExt = isSignExtended(N0, DAG);
8884   bool isN1SExt = isSignExtended(N1, DAG);
8885   if (isN0SExt && isN1SExt)
8886     NewOpc = ARMISD::VMULLs;
8887   else {
8888     bool isN0ZExt = isZeroExtended(N0, DAG);
8889     bool isN1ZExt = isZeroExtended(N1, DAG);
8890     if (isN0ZExt && isN1ZExt)
8891       NewOpc = ARMISD::VMULLu;
8892     else if (isN1SExt || isN1ZExt) {
8893       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
8894       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
8895       if (isN1SExt && isAddSubSExt(N0, DAG)) {
8896         NewOpc = ARMISD::VMULLs;
8897         isMLA = true;
8898       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
8899         NewOpc = ARMISD::VMULLu;
8900         isMLA = true;
8901       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
8902         std::swap(N0, N1);
8903         NewOpc = ARMISD::VMULLu;
8904         isMLA = true;
8905       }
8906     }
8907 
8908     if (!NewOpc) {
8909       if (VT == MVT::v2i64)
8910         // Fall through to expand this.  It is not legal.
8911         return SDValue();
8912       else
8913         // Other vector multiplications are legal.
8914         return Op;
8915     }
8916   }
8917 
8918   // Legalize to a VMULL instruction.
8919   SDLoc DL(Op);
8920   SDValue Op0;
8921   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
8922   if (!isMLA) {
8923     Op0 = SkipExtensionForVMULL(N0, DAG);
8924     assert(Op0.getValueType().is64BitVector() &&
8925            Op1.getValueType().is64BitVector() &&
8926            "unexpected types for extended operands to VMULL");
8927     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
8928   }
8929 
8930   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
8931   // isel lowering to take advantage of no-stall back to back vmul + vmla.
8932   //   vmull q0, d4, d6
8933   //   vmlal q0, d5, d6
8934   // is faster than
8935   //   vaddl q0, d4, d5
8936   //   vmovl q1, d6
8937   //   vmul  q0, q0, q1
8938   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
8939   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
8940   EVT Op1VT = Op1.getValueType();
8941   return DAG.getNode(N0->getOpcode(), DL, VT,
8942                      DAG.getNode(NewOpc, DL, VT,
8943                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
8944                      DAG.getNode(NewOpc, DL, VT,
8945                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
8946 }
8947 
8948 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
8949                               SelectionDAG &DAG) {
8950   // TODO: Should this propagate fast-math-flags?
8951 
8952   // Convert to float
8953   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
8954   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
8955   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
8956   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
8957   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
8958   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
8959   // Get reciprocal estimate.
8960   // float4 recip = vrecpeq_f32(yf);
8961   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8962                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8963                    Y);
8964   // Because char has a smaller range than uchar, we can actually get away
8965   // without any newton steps.  This requires that we use a weird bias
8966   // of 0xb000, however (again, this has been exhaustively tested).
8967   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
8968   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
8969   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
8970   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
8971   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
8972   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
8973   // Convert back to short.
8974   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
8975   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
8976   return X;
8977 }
8978 
8979 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
8980                                SelectionDAG &DAG) {
8981   // TODO: Should this propagate fast-math-flags?
8982 
8983   SDValue N2;
8984   // Convert to float.
8985   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
8986   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
8987   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
8988   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
8989   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
8990   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
8991 
8992   // Use reciprocal estimate and one refinement step.
8993   // float4 recip = vrecpeq_f32(yf);
8994   // recip *= vrecpsq_f32(yf, recip);
8995   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8996                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8997                    N1);
8998   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8999                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9000                    N1, N2);
9001   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9002   // Because short has a smaller range than ushort, we can actually get away
9003   // with only a single newton step.  This requires that we use a weird bias
9004   // of 89, however (again, this has been exhaustively tested).
9005   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
9006   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9007   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9008   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
9009   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9010   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9011   // Convert back to integer and return.
9012   // return vmovn_s32(vcvt_s32_f32(result));
9013   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9014   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9015   return N0;
9016 }
9017 
9018 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
9019                          const ARMSubtarget *ST) {
9020   EVT VT = Op.getValueType();
9021   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9022          "unexpected type for custom-lowering ISD::SDIV");
9023 
9024   SDLoc dl(Op);
9025   SDValue N0 = Op.getOperand(0);
9026   SDValue N1 = Op.getOperand(1);
9027   SDValue N2, N3;
9028 
9029   if (VT == MVT::v8i8) {
9030     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
9031     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
9032 
9033     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9034                      DAG.getIntPtrConstant(4, dl));
9035     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9036                      DAG.getIntPtrConstant(4, dl));
9037     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9038                      DAG.getIntPtrConstant(0, dl));
9039     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9040                      DAG.getIntPtrConstant(0, dl));
9041 
9042     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
9043     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
9044 
9045     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9046     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9047 
9048     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
9049     return N0;
9050   }
9051   return LowerSDIV_v4i16(N0, N1, dl, DAG);
9052 }
9053 
9054 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
9055                          const ARMSubtarget *ST) {
9056   // TODO: Should this propagate fast-math-flags?
9057   EVT VT = Op.getValueType();
9058   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9059          "unexpected type for custom-lowering ISD::UDIV");
9060 
9061   SDLoc dl(Op);
9062   SDValue N0 = Op.getOperand(0);
9063   SDValue N1 = Op.getOperand(1);
9064   SDValue N2, N3;
9065 
9066   if (VT == MVT::v8i8) {
9067     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
9068     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
9069 
9070     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9071                      DAG.getIntPtrConstant(4, dl));
9072     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9073                      DAG.getIntPtrConstant(4, dl));
9074     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9075                      DAG.getIntPtrConstant(0, dl));
9076     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9077                      DAG.getIntPtrConstant(0, dl));
9078 
9079     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
9080     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
9081 
9082     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9083     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9084 
9085     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
9086                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
9087                                      MVT::i32),
9088                      N0);
9089     return N0;
9090   }
9091 
9092   // v4i16 sdiv ... Convert to float.
9093   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
9094   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
9095   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
9096   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
9097   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
9098   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
9099 
9100   // Use reciprocal estimate and two refinement steps.
9101   // float4 recip = vrecpeq_f32(yf);
9102   // recip *= vrecpsq_f32(yf, recip);
9103   // recip *= vrecpsq_f32(yf, recip);
9104   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9105                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
9106                    BN1);
9107   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9108                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9109                    BN1, N2);
9110   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9111   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9112                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9113                    BN1, N2);
9114   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9115   // Simply multiplying by the reciprocal estimate can leave us a few ulps
9116   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
9117   // and that it will never cause us to return an answer too large).
9118   // float4 result = as_float4(as_int4(xf*recip) + 2);
9119   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9120   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9121   N1 = DAG.getConstant(2, dl, MVT::v4i32);
9122   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9123   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9124   // Convert back to integer and return.
9125   // return vmovn_u32(vcvt_s32_f32(result));
9126   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9127   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9128   return N0;
9129 }
9130 
9131 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
9132   SDNode *N = Op.getNode();
9133   EVT VT = N->getValueType(0);
9134   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
9135 
9136   SDValue Carry = Op.getOperand(2);
9137 
9138   SDLoc DL(Op);
9139 
9140   SDValue Result;
9141   if (Op.getOpcode() == ISD::ADDCARRY) {
9142     // This converts the boolean value carry into the carry flag.
9143     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9144 
9145     // Do the addition proper using the carry flag we wanted.
9146     Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
9147                          Op.getOperand(1), Carry);
9148 
9149     // Now convert the carry flag into a boolean value.
9150     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9151   } else {
9152     // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
9153     // have to invert the carry first.
9154     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9155                         DAG.getConstant(1, DL, MVT::i32), Carry);
9156     // This converts the boolean value carry into the carry flag.
9157     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9158 
9159     // Do the subtraction proper using the carry flag we wanted.
9160     Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
9161                          Op.getOperand(1), Carry);
9162 
9163     // Now convert the carry flag into a boolean value.
9164     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9165     // But the carry returned by ARMISD::SUBE is not a borrow as expected
9166     // by ISD::SUBCARRY, so compute 1 - C.
9167     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9168                         DAG.getConstant(1, DL, MVT::i32), Carry);
9169   }
9170 
9171   // Return both values.
9172   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
9173 }
9174 
9175 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
9176   assert(Subtarget->isTargetDarwin());
9177 
9178   // For iOS, we want to call an alternative entry point: __sincos_stret,
9179   // return values are passed via sret.
9180   SDLoc dl(Op);
9181   SDValue Arg = Op.getOperand(0);
9182   EVT ArgVT = Arg.getValueType();
9183   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
9184   auto PtrVT = getPointerTy(DAG.getDataLayout());
9185 
9186   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9187   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9188 
9189   // Pair of floats / doubles used to pass the result.
9190   Type *RetTy = StructType::get(ArgTy, ArgTy);
9191   auto &DL = DAG.getDataLayout();
9192 
9193   ArgListTy Args;
9194   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
9195   SDValue SRet;
9196   if (ShouldUseSRet) {
9197     // Create stack object for sret.
9198     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
9199     const Align StackAlign = DL.getPrefTypeAlign(RetTy);
9200     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
9201     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
9202 
9203     ArgListEntry Entry;
9204     Entry.Node = SRet;
9205     Entry.Ty = RetTy->getPointerTo();
9206     Entry.IsSExt = false;
9207     Entry.IsZExt = false;
9208     Entry.IsSRet = true;
9209     Args.push_back(Entry);
9210     RetTy = Type::getVoidTy(*DAG.getContext());
9211   }
9212 
9213   ArgListEntry Entry;
9214   Entry.Node = Arg;
9215   Entry.Ty = ArgTy;
9216   Entry.IsSExt = false;
9217   Entry.IsZExt = false;
9218   Args.push_back(Entry);
9219 
9220   RTLIB::Libcall LC =
9221       (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
9222   const char *LibcallName = getLibcallName(LC);
9223   CallingConv::ID CC = getLibcallCallingConv(LC);
9224   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
9225 
9226   TargetLowering::CallLoweringInfo CLI(DAG);
9227   CLI.setDebugLoc(dl)
9228       .setChain(DAG.getEntryNode())
9229       .setCallee(CC, RetTy, Callee, std::move(Args))
9230       .setDiscardResult(ShouldUseSRet);
9231   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
9232 
9233   if (!ShouldUseSRet)
9234     return CallResult.first;
9235 
9236   SDValue LoadSin =
9237       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
9238 
9239   // Address of cos field.
9240   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
9241                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
9242   SDValue LoadCos =
9243       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
9244 
9245   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
9246   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
9247                      LoadSin.getValue(0), LoadCos.getValue(0));
9248 }
9249 
9250 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
9251                                                   bool Signed,
9252                                                   SDValue &Chain) const {
9253   EVT VT = Op.getValueType();
9254   assert((VT == MVT::i32 || VT == MVT::i64) &&
9255          "unexpected type for custom lowering DIV");
9256   SDLoc dl(Op);
9257 
9258   const auto &DL = DAG.getDataLayout();
9259   const auto &TLI = DAG.getTargetLoweringInfo();
9260 
9261   const char *Name = nullptr;
9262   if (Signed)
9263     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
9264   else
9265     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
9266 
9267   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
9268 
9269   ARMTargetLowering::ArgListTy Args;
9270 
9271   for (auto AI : {1, 0}) {
9272     ArgListEntry Arg;
9273     Arg.Node = Op.getOperand(AI);
9274     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
9275     Args.push_back(Arg);
9276   }
9277 
9278   CallLoweringInfo CLI(DAG);
9279   CLI.setDebugLoc(dl)
9280     .setChain(Chain)
9281     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
9282                ES, std::move(Args));
9283 
9284   return LowerCallTo(CLI).first;
9285 }
9286 
9287 // This is a code size optimisation: return the original SDIV node to
9288 // DAGCombiner when we don't want to expand SDIV into a sequence of
9289 // instructions, and an empty node otherwise which will cause the
9290 // SDIV to be expanded in DAGCombine.
9291 SDValue
9292 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
9293                                  SelectionDAG &DAG,
9294                                  SmallVectorImpl<SDNode *> &Created) const {
9295   // TODO: Support SREM
9296   if (N->getOpcode() != ISD::SDIV)
9297     return SDValue();
9298 
9299   const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
9300   const bool MinSize = ST.hasMinSize();
9301   const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
9302                                       : ST.hasDivideInARMMode();
9303 
9304   // Don't touch vector types; rewriting this may lead to scalarizing
9305   // the int divs.
9306   if (N->getOperand(0).getValueType().isVector())
9307     return SDValue();
9308 
9309   // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
9310   // hwdiv support for this to be really profitable.
9311   if (!(MinSize && HasDivide))
9312     return SDValue();
9313 
9314   // ARM mode is a bit simpler than Thumb: we can handle large power
9315   // of 2 immediates with 1 mov instruction; no further checks required,
9316   // just return the sdiv node.
9317   if (!ST.isThumb())
9318     return SDValue(N, 0);
9319 
9320   // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
9321   // and thus lose the code size benefits of a MOVS that requires only 2.
9322   // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
9323   // but as it's doing exactly this, it's not worth the trouble to get TTI.
9324   if (Divisor.sgt(128))
9325     return SDValue();
9326 
9327   return SDValue(N, 0);
9328 }
9329 
9330 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
9331                                             bool Signed) const {
9332   assert(Op.getValueType() == MVT::i32 &&
9333          "unexpected type for custom lowering DIV");
9334   SDLoc dl(Op);
9335 
9336   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
9337                                DAG.getEntryNode(), Op.getOperand(1));
9338 
9339   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9340 }
9341 
9342 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
9343   SDLoc DL(N);
9344   SDValue Op = N->getOperand(1);
9345   if (N->getValueType(0) == MVT::i32)
9346     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
9347   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9348                            DAG.getConstant(0, DL, MVT::i32));
9349   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9350                            DAG.getConstant(1, DL, MVT::i32));
9351   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
9352                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
9353 }
9354 
9355 void ARMTargetLowering::ExpandDIV_Windows(
9356     SDValue Op, SelectionDAG &DAG, bool Signed,
9357     SmallVectorImpl<SDValue> &Results) const {
9358   const auto &DL = DAG.getDataLayout();
9359   const auto &TLI = DAG.getTargetLoweringInfo();
9360 
9361   assert(Op.getValueType() == MVT::i64 &&
9362          "unexpected type for custom lowering DIV");
9363   SDLoc dl(Op);
9364 
9365   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
9366 
9367   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9368 
9369   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
9370   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
9371                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
9372   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
9373 
9374   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper));
9375 }
9376 
9377 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
9378   LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
9379   EVT MemVT = LD->getMemoryVT();
9380   assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9381          "Expected a predicate type!");
9382   assert(MemVT == Op.getValueType());
9383   assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
9384          "Expected a non-extending load");
9385   assert(LD->isUnindexed() && "Expected a unindexed load");
9386 
9387   // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit
9388   // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
9389   // need to make sure that 8/4 bits are actually loaded into the correct
9390   // place, which means loading the value and then shuffling the values into
9391   // the bottom bits of the predicate.
9392   // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
9393   // for BE).
9394 
9395   SDLoc dl(Op);
9396   SDValue Load = DAG.getExtLoad(
9397       ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(),
9398       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9399       LD->getMemOperand());
9400   SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load);
9401   if (MemVT != MVT::v16i1)
9402     Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred,
9403                        DAG.getConstant(0, dl, MVT::i32));
9404   return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
9405 }
9406 
9407 void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
9408                                   SelectionDAG &DAG) const {
9409   LoadSDNode *LD = cast<LoadSDNode>(N);
9410   EVT MemVT = LD->getMemoryVT();
9411   assert(LD->isUnindexed() && "Loads should be unindexed at this point.");
9412 
9413   if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
9414       !Subtarget->isThumb1Only() && LD->isVolatile()) {
9415     SDLoc dl(N);
9416     SDValue Result = DAG.getMemIntrinsicNode(
9417         ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}),
9418         {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand());
9419     SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1);
9420     SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0);
9421     SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
9422     Results.append({Pair, Result.getValue(2)});
9423   }
9424 }
9425 
9426 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
9427   StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
9428   EVT MemVT = ST->getMemoryVT();
9429   assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9430          "Expected a predicate type!");
9431   assert(MemVT == ST->getValue().getValueType());
9432   assert(!ST->isTruncatingStore() && "Expected a non-extending store");
9433   assert(ST->isUnindexed() && "Expected a unindexed store");
9434 
9435   // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits
9436   // unset and a scalar store.
9437   SDLoc dl(Op);
9438   SDValue Build = ST->getValue();
9439   if (MemVT != MVT::v16i1) {
9440     SmallVector<SDValue, 16> Ops;
9441     for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++)
9442       Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build,
9443                                 DAG.getConstant(I, dl, MVT::i32)));
9444     for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++)
9445       Ops.push_back(DAG.getUNDEF(MVT::i32));
9446     Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops);
9447   }
9448   SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build);
9449   return DAG.getTruncStore(
9450       ST->getChain(), dl, GRP, ST->getBasePtr(),
9451       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9452       ST->getMemOperand());
9453 }
9454 
9455 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
9456                           const ARMSubtarget *Subtarget) {
9457   StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
9458   EVT MemVT = ST->getMemoryVT();
9459   assert(ST->isUnindexed() && "Stores should be unindexed at this point.");
9460 
9461   if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
9462       !Subtarget->isThumb1Only() && ST->isVolatile()) {
9463     SDNode *N = Op.getNode();
9464     SDLoc dl(N);
9465 
9466     SDValue Lo = DAG.getNode(
9467         ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
9468         DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl,
9469                               MVT::i32));
9470     SDValue Hi = DAG.getNode(
9471         ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
9472         DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl,
9473                               MVT::i32));
9474 
9475     return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other),
9476                                    {ST->getChain(), Lo, Hi, ST->getBasePtr()},
9477                                    MemVT, ST->getMemOperand());
9478   } else if (Subtarget->hasMVEIntegerOps() &&
9479              ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
9480                MemVT == MVT::v16i1))) {
9481     return LowerPredicateStore(Op, DAG);
9482   }
9483 
9484   return SDValue();
9485 }
9486 
9487 static bool isZeroVector(SDValue N) {
9488   return (ISD::isBuildVectorAllZeros(N.getNode()) ||
9489           (N->getOpcode() == ARMISD::VMOVIMM &&
9490            isNullConstant(N->getOperand(0))));
9491 }
9492 
9493 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
9494   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
9495   MVT VT = Op.getSimpleValueType();
9496   SDValue Mask = N->getMask();
9497   SDValue PassThru = N->getPassThru();
9498   SDLoc dl(Op);
9499 
9500   if (isZeroVector(PassThru))
9501     return Op;
9502 
9503   // MVE Masked loads use zero as the passthru value. Here we convert undef to
9504   // zero too, and other values are lowered to a select.
9505   SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
9506                                 DAG.getTargetConstant(0, dl, MVT::i32));
9507   SDValue NewLoad = DAG.getMaskedLoad(
9508       VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec,
9509       N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
9510       N->getExtensionType(), N->isExpandingLoad());
9511   SDValue Combo = NewLoad;
9512   bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
9513                              PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
9514                             isZeroVector(PassThru->getOperand(0));
9515   if (!PassThru.isUndef() && !PassThruIsCastZero)
9516     Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
9517   return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
9518 }
9519 
9520 static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
9521                               const ARMSubtarget *ST) {
9522   if (!ST->hasMVEIntegerOps())
9523     return SDValue();
9524 
9525   SDLoc dl(Op);
9526   unsigned BaseOpcode = 0;
9527   switch (Op->getOpcode()) {
9528   default: llvm_unreachable("Expected VECREDUCE opcode");
9529   case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
9530   case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
9531   case ISD::VECREDUCE_MUL:  BaseOpcode = ISD::MUL; break;
9532   case ISD::VECREDUCE_AND:  BaseOpcode = ISD::AND; break;
9533   case ISD::VECREDUCE_OR:   BaseOpcode = ISD::OR; break;
9534   case ISD::VECREDUCE_XOR:  BaseOpcode = ISD::XOR; break;
9535   case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break;
9536   case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break;
9537   }
9538 
9539   SDValue Op0 = Op->getOperand(0);
9540   EVT VT = Op0.getValueType();
9541   EVT EltVT = VT.getVectorElementType();
9542   unsigned NumElts = VT.getVectorNumElements();
9543   unsigned NumActiveLanes = NumElts;
9544 
9545   assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||
9546           NumActiveLanes == 2) &&
9547          "Only expected a power 2 vector size");
9548 
9549   // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements
9550   // allows us to easily extract vector elements from the lanes.
9551   while (NumActiveLanes > 4) {
9552     unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32;
9553     SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0);
9554     Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev);
9555     NumActiveLanes /= 2;
9556   }
9557 
9558   SDValue Res;
9559   if (NumActiveLanes == 4) {
9560     // The remaining 4 elements are summed sequentially
9561     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9562                               DAG.getConstant(0 * NumElts / 4, dl, MVT::i32));
9563     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9564                               DAG.getConstant(1 * NumElts / 4, dl, MVT::i32));
9565     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9566                               DAG.getConstant(2 * NumElts / 4, dl, MVT::i32));
9567     SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9568                               DAG.getConstant(3 * NumElts / 4, dl, MVT::i32));
9569     SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
9570     SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags());
9571     Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags());
9572   } else {
9573     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9574                               DAG.getConstant(0, dl, MVT::i32));
9575     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9576                               DAG.getConstant(1, dl, MVT::i32));
9577     Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
9578   }
9579 
9580   // Result type may be wider than element type.
9581   if (EltVT != Op->getValueType(0))
9582     Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res);
9583   return Res;
9584 }
9585 
9586 static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
9587                                const ARMSubtarget *ST) {
9588   if (!ST->hasMVEFloatOps())
9589     return SDValue();
9590   return LowerVecReduce(Op, DAG, ST);
9591 }
9592 
9593 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
9594   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
9595     // Acquire/Release load/store is not legal for targets without a dmb or
9596     // equivalent available.
9597     return SDValue();
9598 
9599   // Monotonic load/store is legal for all targets.
9600   return Op;
9601 }
9602 
9603 static void ReplaceREADCYCLECOUNTER(SDNode *N,
9604                                     SmallVectorImpl<SDValue> &Results,
9605                                     SelectionDAG &DAG,
9606                                     const ARMSubtarget *Subtarget) {
9607   SDLoc DL(N);
9608   // Under Power Management extensions, the cycle-count is:
9609   //    mrc p15, #0, <Rt>, c9, c13, #0
9610   SDValue Ops[] = { N->getOperand(0), // Chain
9611                     DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
9612                     DAG.getTargetConstant(15, DL, MVT::i32),
9613                     DAG.getTargetConstant(0, DL, MVT::i32),
9614                     DAG.getTargetConstant(9, DL, MVT::i32),
9615                     DAG.getTargetConstant(13, DL, MVT::i32),
9616                     DAG.getTargetConstant(0, DL, MVT::i32)
9617   };
9618 
9619   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
9620                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
9621   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
9622                                 DAG.getConstant(0, DL, MVT::i32)));
9623   Results.push_back(Cycles32.getValue(1));
9624 }
9625 
9626 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
9627   SDLoc dl(V.getNode());
9628   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
9629   SDValue VHi = DAG.getAnyExtOrTrunc(
9630       DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
9631       dl, MVT::i32);
9632   bool isBigEndian = DAG.getDataLayout().isBigEndian();
9633   if (isBigEndian)
9634     std::swap (VLo, VHi);
9635   SDValue RegClass =
9636       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
9637   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
9638   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
9639   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
9640   return SDValue(
9641       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
9642 }
9643 
9644 static void ReplaceCMP_SWAP_64Results(SDNode *N,
9645                                        SmallVectorImpl<SDValue> & Results,
9646                                        SelectionDAG &DAG) {
9647   assert(N->getValueType(0) == MVT::i64 &&
9648          "AtomicCmpSwap on types less than 64 should be legal");
9649   SDValue Ops[] = {N->getOperand(1),
9650                    createGPRPairNode(DAG, N->getOperand(2)),
9651                    createGPRPairNode(DAG, N->getOperand(3)),
9652                    N->getOperand(0)};
9653   SDNode *CmpSwap = DAG.getMachineNode(
9654       ARM::CMP_SWAP_64, SDLoc(N),
9655       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
9656 
9657   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
9658   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
9659 
9660   bool isBigEndian = DAG.getDataLayout().isBigEndian();
9661 
9662   SDValue Lo =
9663       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
9664                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
9665   SDValue Hi =
9666       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
9667                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
9668   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi));
9669   Results.push_back(SDValue(CmpSwap, 2));
9670 }
9671 
9672 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {
9673   SDLoc dl(Op);
9674   EVT VT = Op.getValueType();
9675   SDValue Chain = Op.getOperand(0);
9676   SDValue LHS = Op.getOperand(1);
9677   SDValue RHS = Op.getOperand(2);
9678   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
9679   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
9680 
9681   // If we don't have instructions of this float type then soften to a libcall
9682   // and use SETCC instead.
9683   if (isUnsupportedFloatingType(LHS.getValueType())) {
9684     DAG.getTargetLoweringInfo().softenSetCCOperands(
9685       DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
9686     if (!RHS.getNode()) {
9687       RHS = DAG.getConstant(0, dl, LHS.getValueType());
9688       CC = ISD::SETNE;
9689     }
9690     SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS,
9691                                  DAG.getCondCode(CC));
9692     return DAG.getMergeValues({Result, Chain}, dl);
9693   }
9694 
9695   ARMCC::CondCodes CondCode, CondCode2;
9696   FPCCToARMCC(CC, CondCode, CondCode2);
9697 
9698   // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit
9699   // in CMPFP and CMPFPE, but instead it should be made explicit by these
9700   // instructions using a chain instead of glue. This would also fix the problem
9701   // here (and also in LowerSELECT_CC) where we generate two comparisons when
9702   // CondCode2 != AL.
9703   SDValue True = DAG.getConstant(1, dl, VT);
9704   SDValue False =  DAG.getConstant(0, dl, VT);
9705   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
9706   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
9707   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9708   SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
9709   if (CondCode2 != ARMCC::AL) {
9710     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
9711     Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9712     Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
9713   }
9714   return DAG.getMergeValues({Result, Chain}, dl);
9715 }
9716 
9717 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9718   LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
9719   switch (Op.getOpcode()) {
9720   default: llvm_unreachable("Don't know how to custom lower this!");
9721   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
9722   case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
9723   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
9724   case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
9725   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
9726   case ISD::SELECT:        return LowerSELECT(Op, DAG);
9727   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
9728   case ISD::BRCOND:        return LowerBRCOND(Op, DAG);
9729   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
9730   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
9731   case ISD::VASTART:       return LowerVASTART(Op, DAG);
9732   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
9733   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
9734   case ISD::SINT_TO_FP:
9735   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
9736   case ISD::STRICT_FP_TO_SINT:
9737   case ISD::STRICT_FP_TO_UINT:
9738   case ISD::FP_TO_SINT:
9739   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
9740   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
9741   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
9742   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
9743   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
9744   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
9745   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
9746   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget);
9747   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
9748                                                                Subtarget);
9749   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
9750   case ISD::SHL:
9751   case ISD::SRL:
9752   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
9753   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
9754   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
9755   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
9756   case ISD::SRL_PARTS:
9757   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
9758   case ISD::CTTZ:
9759   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
9760   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
9761   case ISD::SETCC:         return LowerVSETCC(Op, DAG, Subtarget);
9762   case ISD::SETCCCARRY:    return LowerSETCCCARRY(Op, DAG);
9763   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
9764   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
9765   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
9766   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget);
9767   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
9768   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget);
9769   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget);
9770   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
9771   case ISD::MUL:           return LowerMUL(Op, DAG);
9772   case ISD::SDIV:
9773     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9774       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
9775     return LowerSDIV(Op, DAG, Subtarget);
9776   case ISD::UDIV:
9777     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9778       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
9779     return LowerUDIV(Op, DAG, Subtarget);
9780   case ISD::ADDCARRY:
9781   case ISD::SUBCARRY:      return LowerADDSUBCARRY(Op, DAG);
9782   case ISD::SADDO:
9783   case ISD::SSUBO:
9784     return LowerSignedALUO(Op, DAG);
9785   case ISD::UADDO:
9786   case ISD::USUBO:
9787     return LowerUnsignedALUO(Op, DAG);
9788   case ISD::SADDSAT:
9789   case ISD::SSUBSAT:
9790     return LowerSADDSUBSAT(Op, DAG, Subtarget);
9791   case ISD::LOAD:
9792     return LowerPredicateLoad(Op, DAG);
9793   case ISD::STORE:
9794     return LowerSTORE(Op, DAG, Subtarget);
9795   case ISD::MLOAD:
9796     return LowerMLOAD(Op, DAG);
9797   case ISD::VECREDUCE_MUL:
9798   case ISD::VECREDUCE_AND:
9799   case ISD::VECREDUCE_OR:
9800   case ISD::VECREDUCE_XOR:
9801     return LowerVecReduce(Op, DAG, Subtarget);
9802   case ISD::VECREDUCE_FADD:
9803   case ISD::VECREDUCE_FMUL:
9804   case ISD::VECREDUCE_FMIN:
9805   case ISD::VECREDUCE_FMAX:
9806     return LowerVecReduceF(Op, DAG, Subtarget);
9807   case ISD::ATOMIC_LOAD:
9808   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
9809   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
9810   case ISD::SDIVREM:
9811   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
9812   case ISD::DYNAMIC_STACKALLOC:
9813     if (Subtarget->isTargetWindows())
9814       return LowerDYNAMIC_STACKALLOC(Op, DAG);
9815     llvm_unreachable("Don't know how to custom lower this!");
9816   case ISD::STRICT_FP_ROUND:
9817   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
9818   case ISD::STRICT_FP_EXTEND:
9819   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
9820   case ISD::STRICT_FSETCC:
9821   case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG);
9822   case ARMISD::WIN__DBZCHK: return SDValue();
9823   }
9824 }
9825 
9826 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
9827                                  SelectionDAG &DAG) {
9828   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
9829   unsigned Opc = 0;
9830   if (IntNo == Intrinsic::arm_smlald)
9831     Opc = ARMISD::SMLALD;
9832   else if (IntNo == Intrinsic::arm_smlaldx)
9833     Opc = ARMISD::SMLALDX;
9834   else if (IntNo == Intrinsic::arm_smlsld)
9835     Opc = ARMISD::SMLSLD;
9836   else if (IntNo == Intrinsic::arm_smlsldx)
9837     Opc = ARMISD::SMLSLDX;
9838   else
9839     return;
9840 
9841   SDLoc dl(N);
9842   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9843                            N->getOperand(3),
9844                            DAG.getConstant(0, dl, MVT::i32));
9845   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9846                            N->getOperand(3),
9847                            DAG.getConstant(1, dl, MVT::i32));
9848 
9849   SDValue LongMul = DAG.getNode(Opc, dl,
9850                                 DAG.getVTList(MVT::i32, MVT::i32),
9851                                 N->getOperand(1), N->getOperand(2),
9852                                 Lo, Hi);
9853   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
9854                                 LongMul.getValue(0), LongMul.getValue(1)));
9855 }
9856 
9857 /// ReplaceNodeResults - Replace the results of node with an illegal result
9858 /// type with new values built out of custom code.
9859 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
9860                                            SmallVectorImpl<SDValue> &Results,
9861                                            SelectionDAG &DAG) const {
9862   SDValue Res;
9863   switch (N->getOpcode()) {
9864   default:
9865     llvm_unreachable("Don't know how to custom expand this!");
9866   case ISD::READ_REGISTER:
9867     ExpandREAD_REGISTER(N, Results, DAG);
9868     break;
9869   case ISD::BITCAST:
9870     Res = ExpandBITCAST(N, DAG, Subtarget);
9871     break;
9872   case ISD::SRL:
9873   case ISD::SRA:
9874   case ISD::SHL:
9875     Res = Expand64BitShift(N, DAG, Subtarget);
9876     break;
9877   case ISD::SREM:
9878   case ISD::UREM:
9879     Res = LowerREM(N, DAG);
9880     break;
9881   case ISD::SDIVREM:
9882   case ISD::UDIVREM:
9883     Res = LowerDivRem(SDValue(N, 0), DAG);
9884     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
9885     Results.push_back(Res.getValue(0));
9886     Results.push_back(Res.getValue(1));
9887     return;
9888   case ISD::SADDSAT:
9889   case ISD::SSUBSAT:
9890     Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget);
9891     break;
9892   case ISD::READCYCLECOUNTER:
9893     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
9894     return;
9895   case ISD::UDIV:
9896   case ISD::SDIV:
9897     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
9898     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
9899                              Results);
9900   case ISD::ATOMIC_CMP_SWAP:
9901     ReplaceCMP_SWAP_64Results(N, Results, DAG);
9902     return;
9903   case ISD::INTRINSIC_WO_CHAIN:
9904     return ReplaceLongIntrinsic(N, Results, DAG);
9905   case ISD::ABS:
9906      lowerABS(N, Results, DAG);
9907      return ;
9908   case ISD::LOAD:
9909     LowerLOAD(N, Results, DAG);
9910     break;
9911   }
9912   if (Res.getNode())
9913     Results.push_back(Res);
9914 }
9915 
9916 //===----------------------------------------------------------------------===//
9917 //                           ARM Scheduler Hooks
9918 //===----------------------------------------------------------------------===//
9919 
9920 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
9921 /// registers the function context.
9922 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
9923                                                MachineBasicBlock *MBB,
9924                                                MachineBasicBlock *DispatchBB,
9925                                                int FI) const {
9926   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
9927          "ROPI/RWPI not currently supported with SjLj");
9928   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9929   DebugLoc dl = MI.getDebugLoc();
9930   MachineFunction *MF = MBB->getParent();
9931   MachineRegisterInfo *MRI = &MF->getRegInfo();
9932   MachineConstantPool *MCP = MF->getConstantPool();
9933   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
9934   const Function &F = MF->getFunction();
9935 
9936   bool isThumb = Subtarget->isThumb();
9937   bool isThumb2 = Subtarget->isThumb2();
9938 
9939   unsigned PCLabelId = AFI->createPICLabelUId();
9940   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
9941   ARMConstantPoolValue *CPV =
9942     ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
9943   unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4));
9944 
9945   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
9946                                            : &ARM::GPRRegClass;
9947 
9948   // Grab constant pool and fixed stack memory operands.
9949   MachineMemOperand *CPMMO =
9950       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
9951                                MachineMemOperand::MOLoad, 4, Align(4));
9952 
9953   MachineMemOperand *FIMMOSt =
9954       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
9955                                MachineMemOperand::MOStore, 4, Align(4));
9956 
9957   // Load the address of the dispatch MBB into the jump buffer.
9958   if (isThumb2) {
9959     // Incoming value: jbuf
9960     //   ldr.n  r5, LCPI1_1
9961     //   orr    r5, r5, #1
9962     //   add    r5, pc
9963     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
9964     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9965     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
9966         .addConstantPoolIndex(CPI)
9967         .addMemOperand(CPMMO)
9968         .add(predOps(ARMCC::AL));
9969     // Set the low bit because of thumb mode.
9970     Register NewVReg2 = MRI->createVirtualRegister(TRC);
9971     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
9972         .addReg(NewVReg1, RegState::Kill)
9973         .addImm(0x01)
9974         .add(predOps(ARMCC::AL))
9975         .add(condCodeOp());
9976     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9977     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
9978       .addReg(NewVReg2, RegState::Kill)
9979       .addImm(PCLabelId);
9980     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
9981         .addReg(NewVReg3, RegState::Kill)
9982         .addFrameIndex(FI)
9983         .addImm(36) // &jbuf[1] :: pc
9984         .addMemOperand(FIMMOSt)
9985         .add(predOps(ARMCC::AL));
9986   } else if (isThumb) {
9987     // Incoming value: jbuf
9988     //   ldr.n  r1, LCPI1_4
9989     //   add    r1, pc
9990     //   mov    r2, #1
9991     //   orrs   r1, r2
9992     //   add    r2, $jbuf, #+4 ; &jbuf[1]
9993     //   str    r1, [r2]
9994     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9995     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
9996         .addConstantPoolIndex(CPI)
9997         .addMemOperand(CPMMO)
9998         .add(predOps(ARMCC::AL));
9999     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10000     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
10001       .addReg(NewVReg1, RegState::Kill)
10002       .addImm(PCLabelId);
10003     // Set the low bit because of thumb mode.
10004     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10005     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
10006         .addReg(ARM::CPSR, RegState::Define)
10007         .addImm(1)
10008         .add(predOps(ARMCC::AL));
10009     Register NewVReg4 = MRI->createVirtualRegister(TRC);
10010     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
10011         .addReg(ARM::CPSR, RegState::Define)
10012         .addReg(NewVReg2, RegState::Kill)
10013         .addReg(NewVReg3, RegState::Kill)
10014         .add(predOps(ARMCC::AL));
10015     Register NewVReg5 = MRI->createVirtualRegister(TRC);
10016     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
10017             .addFrameIndex(FI)
10018             .addImm(36); // &jbuf[1] :: pc
10019     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
10020         .addReg(NewVReg4, RegState::Kill)
10021         .addReg(NewVReg5, RegState::Kill)
10022         .addImm(0)
10023         .addMemOperand(FIMMOSt)
10024         .add(predOps(ARMCC::AL));
10025   } else {
10026     // Incoming value: jbuf
10027     //   ldr  r1, LCPI1_1
10028     //   add  r1, pc, r1
10029     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
10030     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10031     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
10032         .addConstantPoolIndex(CPI)
10033         .addImm(0)
10034         .addMemOperand(CPMMO)
10035         .add(predOps(ARMCC::AL));
10036     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10037     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
10038         .addReg(NewVReg1, RegState::Kill)
10039         .addImm(PCLabelId)
10040         .add(predOps(ARMCC::AL));
10041     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
10042         .addReg(NewVReg2, RegState::Kill)
10043         .addFrameIndex(FI)
10044         .addImm(36) // &jbuf[1] :: pc
10045         .addMemOperand(FIMMOSt)
10046         .add(predOps(ARMCC::AL));
10047   }
10048 }
10049 
10050 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
10051                                               MachineBasicBlock *MBB) const {
10052   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10053   DebugLoc dl = MI.getDebugLoc();
10054   MachineFunction *MF = MBB->getParent();
10055   MachineRegisterInfo *MRI = &MF->getRegInfo();
10056   MachineFrameInfo &MFI = MF->getFrameInfo();
10057   int FI = MFI.getFunctionContextIndex();
10058 
10059   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
10060                                                         : &ARM::GPRnopcRegClass;
10061 
10062   // Get a mapping of the call site numbers to all of the landing pads they're
10063   // associated with.
10064   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
10065   unsigned MaxCSNum = 0;
10066   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
10067        ++BB) {
10068     if (!BB->isEHPad()) continue;
10069 
10070     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
10071     // pad.
10072     for (MachineBasicBlock::iterator
10073            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
10074       if (!II->isEHLabel()) continue;
10075 
10076       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
10077       if (!MF->hasCallSiteLandingPad(Sym)) continue;
10078 
10079       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
10080       for (SmallVectorImpl<unsigned>::iterator
10081              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
10082            CSI != CSE; ++CSI) {
10083         CallSiteNumToLPad[*CSI].push_back(&*BB);
10084         MaxCSNum = std::max(MaxCSNum, *CSI);
10085       }
10086       break;
10087     }
10088   }
10089 
10090   // Get an ordered list of the machine basic blocks for the jump table.
10091   std::vector<MachineBasicBlock*> LPadList;
10092   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
10093   LPadList.reserve(CallSiteNumToLPad.size());
10094   for (unsigned I = 1; I <= MaxCSNum; ++I) {
10095     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
10096     for (SmallVectorImpl<MachineBasicBlock*>::iterator
10097            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
10098       LPadList.push_back(*II);
10099       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
10100     }
10101   }
10102 
10103   assert(!LPadList.empty() &&
10104          "No landing pad destinations for the dispatch jump table!");
10105 
10106   // Create the jump table and associated information.
10107   MachineJumpTableInfo *JTI =
10108     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
10109   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
10110 
10111   // Create the MBBs for the dispatch code.
10112 
10113   // Shove the dispatch's address into the return slot in the function context.
10114   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
10115   DispatchBB->setIsEHPad();
10116 
10117   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10118   unsigned trap_opcode;
10119   if (Subtarget->isThumb())
10120     trap_opcode = ARM::tTRAP;
10121   else
10122     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
10123 
10124   BuildMI(TrapBB, dl, TII->get(trap_opcode));
10125   DispatchBB->addSuccessor(TrapBB);
10126 
10127   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
10128   DispatchBB->addSuccessor(DispContBB);
10129 
10130   // Insert and MBBs.
10131   MF->insert(MF->end(), DispatchBB);
10132   MF->insert(MF->end(), DispContBB);
10133   MF->insert(MF->end(), TrapBB);
10134 
10135   // Insert code into the entry block that creates and registers the function
10136   // context.
10137   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
10138 
10139   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
10140       MachinePointerInfo::getFixedStack(*MF, FI),
10141       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4));
10142 
10143   MachineInstrBuilder MIB;
10144   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
10145 
10146   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
10147   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
10148 
10149   // Add a register mask with no preserved registers.  This results in all
10150   // registers being marked as clobbered. This can't work if the dispatch block
10151   // is in a Thumb1 function and is linked with ARM code which uses the FP
10152   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
10153   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
10154 
10155   bool IsPositionIndependent = isPositionIndependent();
10156   unsigned NumLPads = LPadList.size();
10157   if (Subtarget->isThumb2()) {
10158     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10159     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
10160         .addFrameIndex(FI)
10161         .addImm(4)
10162         .addMemOperand(FIMMOLd)
10163         .add(predOps(ARMCC::AL));
10164 
10165     if (NumLPads < 256) {
10166       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
10167           .addReg(NewVReg1)
10168           .addImm(LPadList.size())
10169           .add(predOps(ARMCC::AL));
10170     } else {
10171       Register VReg1 = MRI->createVirtualRegister(TRC);
10172       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
10173           .addImm(NumLPads & 0xFFFF)
10174           .add(predOps(ARMCC::AL));
10175 
10176       unsigned VReg2 = VReg1;
10177       if ((NumLPads & 0xFFFF0000) != 0) {
10178         VReg2 = MRI->createVirtualRegister(TRC);
10179         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
10180             .addReg(VReg1)
10181             .addImm(NumLPads >> 16)
10182             .add(predOps(ARMCC::AL));
10183       }
10184 
10185       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
10186           .addReg(NewVReg1)
10187           .addReg(VReg2)
10188           .add(predOps(ARMCC::AL));
10189     }
10190 
10191     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
10192       .addMBB(TrapBB)
10193       .addImm(ARMCC::HI)
10194       .addReg(ARM::CPSR);
10195 
10196     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10197     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
10198         .addJumpTableIndex(MJTI)
10199         .add(predOps(ARMCC::AL));
10200 
10201     Register NewVReg4 = MRI->createVirtualRegister(TRC);
10202     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
10203         .addReg(NewVReg3, RegState::Kill)
10204         .addReg(NewVReg1)
10205         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
10206         .add(predOps(ARMCC::AL))
10207         .add(condCodeOp());
10208 
10209     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
10210       .addReg(NewVReg4, RegState::Kill)
10211       .addReg(NewVReg1)
10212       .addJumpTableIndex(MJTI);
10213   } else if (Subtarget->isThumb()) {
10214     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10215     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
10216         .addFrameIndex(FI)
10217         .addImm(1)
10218         .addMemOperand(FIMMOLd)
10219         .add(predOps(ARMCC::AL));
10220 
10221     if (NumLPads < 256) {
10222       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
10223           .addReg(NewVReg1)
10224           .addImm(NumLPads)
10225           .add(predOps(ARMCC::AL));
10226     } else {
10227       MachineConstantPool *ConstantPool = MF->getConstantPool();
10228       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10229       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
10230 
10231       // MachineConstantPool wants an explicit alignment.
10232       Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10233       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10234 
10235       Register VReg1 = MRI->createVirtualRegister(TRC);
10236       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
10237           .addReg(VReg1, RegState::Define)
10238           .addConstantPoolIndex(Idx)
10239           .add(predOps(ARMCC::AL));
10240       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
10241           .addReg(NewVReg1)
10242           .addReg(VReg1)
10243           .add(predOps(ARMCC::AL));
10244     }
10245 
10246     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
10247       .addMBB(TrapBB)
10248       .addImm(ARMCC::HI)
10249       .addReg(ARM::CPSR);
10250 
10251     Register NewVReg2 = MRI->createVirtualRegister(TRC);
10252     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
10253         .addReg(ARM::CPSR, RegState::Define)
10254         .addReg(NewVReg1)
10255         .addImm(2)
10256         .add(predOps(ARMCC::AL));
10257 
10258     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10259     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
10260         .addJumpTableIndex(MJTI)
10261         .add(predOps(ARMCC::AL));
10262 
10263     Register NewVReg4 = MRI->createVirtualRegister(TRC);
10264     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
10265         .addReg(ARM::CPSR, RegState::Define)
10266         .addReg(NewVReg2, RegState::Kill)
10267         .addReg(NewVReg3)
10268         .add(predOps(ARMCC::AL));
10269 
10270     MachineMemOperand *JTMMOLd =
10271         MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
10272                                  MachineMemOperand::MOLoad, 4, Align(4));
10273 
10274     Register NewVReg5 = MRI->createVirtualRegister(TRC);
10275     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
10276         .addReg(NewVReg4, RegState::Kill)
10277         .addImm(0)
10278         .addMemOperand(JTMMOLd)
10279         .add(predOps(ARMCC::AL));
10280 
10281     unsigned NewVReg6 = NewVReg5;
10282     if (IsPositionIndependent) {
10283       NewVReg6 = MRI->createVirtualRegister(TRC);
10284       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
10285           .addReg(ARM::CPSR, RegState::Define)
10286           .addReg(NewVReg5, RegState::Kill)
10287           .addReg(NewVReg3)
10288           .add(predOps(ARMCC::AL));
10289     }
10290 
10291     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
10292       .addReg(NewVReg6, RegState::Kill)
10293       .addJumpTableIndex(MJTI);
10294   } else {
10295     Register NewVReg1 = MRI->createVirtualRegister(TRC);
10296     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
10297         .addFrameIndex(FI)
10298         .addImm(4)
10299         .addMemOperand(FIMMOLd)
10300         .add(predOps(ARMCC::AL));
10301 
10302     if (NumLPads < 256) {
10303       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
10304           .addReg(NewVReg1)
10305           .addImm(NumLPads)
10306           .add(predOps(ARMCC::AL));
10307     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
10308       Register VReg1 = MRI->createVirtualRegister(TRC);
10309       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
10310           .addImm(NumLPads & 0xFFFF)
10311           .add(predOps(ARMCC::AL));
10312 
10313       unsigned VReg2 = VReg1;
10314       if ((NumLPads & 0xFFFF0000) != 0) {
10315         VReg2 = MRI->createVirtualRegister(TRC);
10316         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
10317             .addReg(VReg1)
10318             .addImm(NumLPads >> 16)
10319             .add(predOps(ARMCC::AL));
10320       }
10321 
10322       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
10323           .addReg(NewVReg1)
10324           .addReg(VReg2)
10325           .add(predOps(ARMCC::AL));
10326     } else {
10327       MachineConstantPool *ConstantPool = MF->getConstantPool();
10328       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10329       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
10330 
10331       // MachineConstantPool wants an explicit alignment.
10332       Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10333       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10334 
10335       Register VReg1 = MRI->createVirtualRegister(TRC);
10336       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
10337           .addReg(VReg1, RegState::Define)
10338           .addConstantPoolIndex(Idx)
10339           .addImm(0)
10340           .add(predOps(ARMCC::AL));
10341       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
10342           .addReg(NewVReg1)
10343           .addReg(VReg1, RegState::Kill)
10344           .add(predOps(ARMCC::AL));
10345     }
10346 
10347     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
10348       .addMBB(TrapBB)
10349       .addImm(ARMCC::HI)
10350       .addReg(ARM::CPSR);
10351 
10352     Register NewVReg3 = MRI->createVirtualRegister(TRC);
10353     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
10354         .addReg(NewVReg1)
10355         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
10356         .add(predOps(ARMCC::AL))
10357         .add(condCodeOp());
10358     Register NewVReg4 = MRI->createVirtualRegister(TRC);
10359     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
10360         .addJumpTableIndex(MJTI)
10361         .add(predOps(ARMCC::AL));
10362 
10363     MachineMemOperand *JTMMOLd =
10364         MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
10365                                  MachineMemOperand::MOLoad, 4, Align(4));
10366     Register NewVReg5 = MRI->createVirtualRegister(TRC);
10367     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
10368         .addReg(NewVReg3, RegState::Kill)
10369         .addReg(NewVReg4)
10370         .addImm(0)
10371         .addMemOperand(JTMMOLd)
10372         .add(predOps(ARMCC::AL));
10373 
10374     if (IsPositionIndependent) {
10375       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
10376         .addReg(NewVReg5, RegState::Kill)
10377         .addReg(NewVReg4)
10378         .addJumpTableIndex(MJTI);
10379     } else {
10380       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
10381         .addReg(NewVReg5, RegState::Kill)
10382         .addJumpTableIndex(MJTI);
10383     }
10384   }
10385 
10386   // Add the jump table entries as successors to the MBB.
10387   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
10388   for (std::vector<MachineBasicBlock*>::iterator
10389          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
10390     MachineBasicBlock *CurMBB = *I;
10391     if (SeenMBBs.insert(CurMBB).second)
10392       DispContBB->addSuccessor(CurMBB);
10393   }
10394 
10395   // N.B. the order the invoke BBs are processed in doesn't matter here.
10396   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
10397   SmallVector<MachineBasicBlock*, 64> MBBLPads;
10398   for (MachineBasicBlock *BB : InvokeBBs) {
10399 
10400     // Remove the landing pad successor from the invoke block and replace it
10401     // with the new dispatch block.
10402     SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
10403                                                   BB->succ_end());
10404     while (!Successors.empty()) {
10405       MachineBasicBlock *SMBB = Successors.pop_back_val();
10406       if (SMBB->isEHPad()) {
10407         BB->removeSuccessor(SMBB);
10408         MBBLPads.push_back(SMBB);
10409       }
10410     }
10411 
10412     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
10413     BB->normalizeSuccProbs();
10414 
10415     // Find the invoke call and mark all of the callee-saved registers as
10416     // 'implicit defined' so that they're spilled. This prevents code from
10417     // moving instructions to before the EH block, where they will never be
10418     // executed.
10419     for (MachineBasicBlock::reverse_iterator
10420            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
10421       if (!II->isCall()) continue;
10422 
10423       DenseMap<unsigned, bool> DefRegs;
10424       for (MachineInstr::mop_iterator
10425              OI = II->operands_begin(), OE = II->operands_end();
10426            OI != OE; ++OI) {
10427         if (!OI->isReg()) continue;
10428         DefRegs[OI->getReg()] = true;
10429       }
10430 
10431       MachineInstrBuilder MIB(*MF, &*II);
10432 
10433       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
10434         unsigned Reg = SavedRegs[i];
10435         if (Subtarget->isThumb2() &&
10436             !ARM::tGPRRegClass.contains(Reg) &&
10437             !ARM::hGPRRegClass.contains(Reg))
10438           continue;
10439         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
10440           continue;
10441         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
10442           continue;
10443         if (!DefRegs[Reg])
10444           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
10445       }
10446 
10447       break;
10448     }
10449   }
10450 
10451   // Mark all former landing pads as non-landing pads. The dispatch is the only
10452   // landing pad now.
10453   for (SmallVectorImpl<MachineBasicBlock*>::iterator
10454          I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
10455     (*I)->setIsEHPad(false);
10456 
10457   // The instruction is gone now.
10458   MI.eraseFromParent();
10459 }
10460 
10461 static
10462 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
10463   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
10464        E = MBB->succ_end(); I != E; ++I)
10465     if (*I != Succ)
10466       return *I;
10467   llvm_unreachable("Expecting a BB with two successors!");
10468 }
10469 
10470 /// Return the load opcode for a given load size. If load size >= 8,
10471 /// neon opcode will be returned.
10472 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
10473   if (LdSize >= 8)
10474     return LdSize == 16 ? ARM::VLD1q32wb_fixed
10475                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
10476   if (IsThumb1)
10477     return LdSize == 4 ? ARM::tLDRi
10478                        : LdSize == 2 ? ARM::tLDRHi
10479                                      : LdSize == 1 ? ARM::tLDRBi : 0;
10480   if (IsThumb2)
10481     return LdSize == 4 ? ARM::t2LDR_POST
10482                        : LdSize == 2 ? ARM::t2LDRH_POST
10483                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
10484   return LdSize == 4 ? ARM::LDR_POST_IMM
10485                      : LdSize == 2 ? ARM::LDRH_POST
10486                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
10487 }
10488 
10489 /// Return the store opcode for a given store size. If store size >= 8,
10490 /// neon opcode will be returned.
10491 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
10492   if (StSize >= 8)
10493     return StSize == 16 ? ARM::VST1q32wb_fixed
10494                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
10495   if (IsThumb1)
10496     return StSize == 4 ? ARM::tSTRi
10497                        : StSize == 2 ? ARM::tSTRHi
10498                                      : StSize == 1 ? ARM::tSTRBi : 0;
10499   if (IsThumb2)
10500     return StSize == 4 ? ARM::t2STR_POST
10501                        : StSize == 2 ? ARM::t2STRH_POST
10502                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
10503   return StSize == 4 ? ARM::STR_POST_IMM
10504                      : StSize == 2 ? ARM::STRH_POST
10505                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
10506 }
10507 
10508 /// Emit a post-increment load operation with given size. The instructions
10509 /// will be added to BB at Pos.
10510 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10511                        const TargetInstrInfo *TII, const DebugLoc &dl,
10512                        unsigned LdSize, unsigned Data, unsigned AddrIn,
10513                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10514   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
10515   assert(LdOpc != 0 && "Should have a load opcode");
10516   if (LdSize >= 8) {
10517     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10518         .addReg(AddrOut, RegState::Define)
10519         .addReg(AddrIn)
10520         .addImm(0)
10521         .add(predOps(ARMCC::AL));
10522   } else if (IsThumb1) {
10523     // load + update AddrIn
10524     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10525         .addReg(AddrIn)
10526         .addImm(0)
10527         .add(predOps(ARMCC::AL));
10528     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10529         .add(t1CondCodeOp())
10530         .addReg(AddrIn)
10531         .addImm(LdSize)
10532         .add(predOps(ARMCC::AL));
10533   } else if (IsThumb2) {
10534     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10535         .addReg(AddrOut, RegState::Define)
10536         .addReg(AddrIn)
10537         .addImm(LdSize)
10538         .add(predOps(ARMCC::AL));
10539   } else { // arm
10540     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10541         .addReg(AddrOut, RegState::Define)
10542         .addReg(AddrIn)
10543         .addReg(0)
10544         .addImm(LdSize)
10545         .add(predOps(ARMCC::AL));
10546   }
10547 }
10548 
10549 /// Emit a post-increment store operation with given size. The instructions
10550 /// will be added to BB at Pos.
10551 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10552                        const TargetInstrInfo *TII, const DebugLoc &dl,
10553                        unsigned StSize, unsigned Data, unsigned AddrIn,
10554                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10555   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
10556   assert(StOpc != 0 && "Should have a store opcode");
10557   if (StSize >= 8) {
10558     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10559         .addReg(AddrIn)
10560         .addImm(0)
10561         .addReg(Data)
10562         .add(predOps(ARMCC::AL));
10563   } else if (IsThumb1) {
10564     // store + update AddrIn
10565     BuildMI(*BB, Pos, dl, TII->get(StOpc))
10566         .addReg(Data)
10567         .addReg(AddrIn)
10568         .addImm(0)
10569         .add(predOps(ARMCC::AL));
10570     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10571         .add(t1CondCodeOp())
10572         .addReg(AddrIn)
10573         .addImm(StSize)
10574         .add(predOps(ARMCC::AL));
10575   } else if (IsThumb2) {
10576     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10577         .addReg(Data)
10578         .addReg(AddrIn)
10579         .addImm(StSize)
10580         .add(predOps(ARMCC::AL));
10581   } else { // arm
10582     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10583         .addReg(Data)
10584         .addReg(AddrIn)
10585         .addReg(0)
10586         .addImm(StSize)
10587         .add(predOps(ARMCC::AL));
10588   }
10589 }
10590 
10591 MachineBasicBlock *
10592 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
10593                                    MachineBasicBlock *BB) const {
10594   // This pseudo instruction has 3 operands: dst, src, size
10595   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
10596   // Otherwise, we will generate unrolled scalar copies.
10597   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10598   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10599   MachineFunction::iterator It = ++BB->getIterator();
10600 
10601   Register dest = MI.getOperand(0).getReg();
10602   Register src = MI.getOperand(1).getReg();
10603   unsigned SizeVal = MI.getOperand(2).getImm();
10604   unsigned Alignment = MI.getOperand(3).getImm();
10605   DebugLoc dl = MI.getDebugLoc();
10606 
10607   MachineFunction *MF = BB->getParent();
10608   MachineRegisterInfo &MRI = MF->getRegInfo();
10609   unsigned UnitSize = 0;
10610   const TargetRegisterClass *TRC = nullptr;
10611   const TargetRegisterClass *VecTRC = nullptr;
10612 
10613   bool IsThumb1 = Subtarget->isThumb1Only();
10614   bool IsThumb2 = Subtarget->isThumb2();
10615   bool IsThumb = Subtarget->isThumb();
10616 
10617   if (Alignment & 1) {
10618     UnitSize = 1;
10619   } else if (Alignment & 2) {
10620     UnitSize = 2;
10621   } else {
10622     // Check whether we can use NEON instructions.
10623     if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
10624         Subtarget->hasNEON()) {
10625       if ((Alignment % 16 == 0) && SizeVal >= 16)
10626         UnitSize = 16;
10627       else if ((Alignment % 8 == 0) && SizeVal >= 8)
10628         UnitSize = 8;
10629     }
10630     // Can't use NEON instructions.
10631     if (UnitSize == 0)
10632       UnitSize = 4;
10633   }
10634 
10635   // Select the correct opcode and register class for unit size load/store
10636   bool IsNeon = UnitSize >= 8;
10637   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
10638   if (IsNeon)
10639     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
10640                             : UnitSize == 8 ? &ARM::DPRRegClass
10641                                             : nullptr;
10642 
10643   unsigned BytesLeft = SizeVal % UnitSize;
10644   unsigned LoopSize = SizeVal - BytesLeft;
10645 
10646   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
10647     // Use LDR and STR to copy.
10648     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
10649     // [destOut] = STR_POST(scratch, destIn, UnitSize)
10650     unsigned srcIn = src;
10651     unsigned destIn = dest;
10652     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
10653       Register srcOut = MRI.createVirtualRegister(TRC);
10654       Register destOut = MRI.createVirtualRegister(TRC);
10655       Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10656       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
10657                  IsThumb1, IsThumb2);
10658       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
10659                  IsThumb1, IsThumb2);
10660       srcIn = srcOut;
10661       destIn = destOut;
10662     }
10663 
10664     // Handle the leftover bytes with LDRB and STRB.
10665     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
10666     // [destOut] = STRB_POST(scratch, destIn, 1)
10667     for (unsigned i = 0; i < BytesLeft; i++) {
10668       Register srcOut = MRI.createVirtualRegister(TRC);
10669       Register destOut = MRI.createVirtualRegister(TRC);
10670       Register scratch = MRI.createVirtualRegister(TRC);
10671       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
10672                  IsThumb1, IsThumb2);
10673       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
10674                  IsThumb1, IsThumb2);
10675       srcIn = srcOut;
10676       destIn = destOut;
10677     }
10678     MI.eraseFromParent(); // The instruction is gone now.
10679     return BB;
10680   }
10681 
10682   // Expand the pseudo op to a loop.
10683   // thisMBB:
10684   //   ...
10685   //   movw varEnd, # --> with thumb2
10686   //   movt varEnd, #
10687   //   ldrcp varEnd, idx --> without thumb2
10688   //   fallthrough --> loopMBB
10689   // loopMBB:
10690   //   PHI varPhi, varEnd, varLoop
10691   //   PHI srcPhi, src, srcLoop
10692   //   PHI destPhi, dst, destLoop
10693   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10694   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
10695   //   subs varLoop, varPhi, #UnitSize
10696   //   bne loopMBB
10697   //   fallthrough --> exitMBB
10698   // exitMBB:
10699   //   epilogue to handle left-over bytes
10700   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10701   //   [destOut] = STRB_POST(scratch, destLoop, 1)
10702   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10703   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10704   MF->insert(It, loopMBB);
10705   MF->insert(It, exitMBB);
10706 
10707   // Transfer the remainder of BB and its successor edges to exitMBB.
10708   exitMBB->splice(exitMBB->begin(), BB,
10709                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10710   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10711 
10712   // Load an immediate to varEnd.
10713   Register varEnd = MRI.createVirtualRegister(TRC);
10714   if (Subtarget->useMovt()) {
10715     unsigned Vtmp = varEnd;
10716     if ((LoopSize & 0xFFFF0000) != 0)
10717       Vtmp = MRI.createVirtualRegister(TRC);
10718     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
10719         .addImm(LoopSize & 0xFFFF)
10720         .add(predOps(ARMCC::AL));
10721 
10722     if ((LoopSize & 0xFFFF0000) != 0)
10723       BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
10724           .addReg(Vtmp)
10725           .addImm(LoopSize >> 16)
10726           .add(predOps(ARMCC::AL));
10727   } else {
10728     MachineConstantPool *ConstantPool = MF->getConstantPool();
10729     Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10730     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
10731 
10732     // MachineConstantPool wants an explicit alignment.
10733     Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10734     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10735     MachineMemOperand *CPMMO =
10736         MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
10737                                  MachineMemOperand::MOLoad, 4, Align(4));
10738 
10739     if (IsThumb)
10740       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
10741           .addReg(varEnd, RegState::Define)
10742           .addConstantPoolIndex(Idx)
10743           .add(predOps(ARMCC::AL))
10744           .addMemOperand(CPMMO);
10745     else
10746       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
10747           .addReg(varEnd, RegState::Define)
10748           .addConstantPoolIndex(Idx)
10749           .addImm(0)
10750           .add(predOps(ARMCC::AL))
10751           .addMemOperand(CPMMO);
10752   }
10753   BB->addSuccessor(loopMBB);
10754 
10755   // Generate the loop body:
10756   //   varPhi = PHI(varLoop, varEnd)
10757   //   srcPhi = PHI(srcLoop, src)
10758   //   destPhi = PHI(destLoop, dst)
10759   MachineBasicBlock *entryBB = BB;
10760   BB = loopMBB;
10761   Register varLoop = MRI.createVirtualRegister(TRC);
10762   Register varPhi = MRI.createVirtualRegister(TRC);
10763   Register srcLoop = MRI.createVirtualRegister(TRC);
10764   Register srcPhi = MRI.createVirtualRegister(TRC);
10765   Register destLoop = MRI.createVirtualRegister(TRC);
10766   Register destPhi = MRI.createVirtualRegister(TRC);
10767 
10768   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
10769     .addReg(varLoop).addMBB(loopMBB)
10770     .addReg(varEnd).addMBB(entryBB);
10771   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
10772     .addReg(srcLoop).addMBB(loopMBB)
10773     .addReg(src).addMBB(entryBB);
10774   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
10775     .addReg(destLoop).addMBB(loopMBB)
10776     .addReg(dest).addMBB(entryBB);
10777 
10778   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10779   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
10780   Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10781   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
10782              IsThumb1, IsThumb2);
10783   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
10784              IsThumb1, IsThumb2);
10785 
10786   // Decrement loop variable by UnitSize.
10787   if (IsThumb1) {
10788     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
10789         .add(t1CondCodeOp())
10790         .addReg(varPhi)
10791         .addImm(UnitSize)
10792         .add(predOps(ARMCC::AL));
10793   } else {
10794     MachineInstrBuilder MIB =
10795         BuildMI(*BB, BB->end(), dl,
10796                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
10797     MIB.addReg(varPhi)
10798         .addImm(UnitSize)
10799         .add(predOps(ARMCC::AL))
10800         .add(condCodeOp());
10801     MIB->getOperand(5).setReg(ARM::CPSR);
10802     MIB->getOperand(5).setIsDef(true);
10803   }
10804   BuildMI(*BB, BB->end(), dl,
10805           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
10806       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
10807 
10808   // loopMBB can loop back to loopMBB or fall through to exitMBB.
10809   BB->addSuccessor(loopMBB);
10810   BB->addSuccessor(exitMBB);
10811 
10812   // Add epilogue to handle BytesLeft.
10813   BB = exitMBB;
10814   auto StartOfExit = exitMBB->begin();
10815 
10816   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10817   //   [destOut] = STRB_POST(scratch, destLoop, 1)
10818   unsigned srcIn = srcLoop;
10819   unsigned destIn = destLoop;
10820   for (unsigned i = 0; i < BytesLeft; i++) {
10821     Register srcOut = MRI.createVirtualRegister(TRC);
10822     Register destOut = MRI.createVirtualRegister(TRC);
10823     Register scratch = MRI.createVirtualRegister(TRC);
10824     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
10825                IsThumb1, IsThumb2);
10826     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
10827                IsThumb1, IsThumb2);
10828     srcIn = srcOut;
10829     destIn = destOut;
10830   }
10831 
10832   MI.eraseFromParent(); // The instruction is gone now.
10833   return BB;
10834 }
10835 
10836 MachineBasicBlock *
10837 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
10838                                        MachineBasicBlock *MBB) const {
10839   const TargetMachine &TM = getTargetMachine();
10840   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
10841   DebugLoc DL = MI.getDebugLoc();
10842 
10843   assert(Subtarget->isTargetWindows() &&
10844          "__chkstk is only supported on Windows");
10845   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
10846 
10847   // __chkstk takes the number of words to allocate on the stack in R4, and
10848   // returns the stack adjustment in number of bytes in R4.  This will not
10849   // clober any other registers (other than the obvious lr).
10850   //
10851   // Although, technically, IP should be considered a register which may be
10852   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
10853   // thumb-2 environment, so there is no interworking required.  As a result, we
10854   // do not expect a veneer to be emitted by the linker, clobbering IP.
10855   //
10856   // Each module receives its own copy of __chkstk, so no import thunk is
10857   // required, again, ensuring that IP is not clobbered.
10858   //
10859   // Finally, although some linkers may theoretically provide a trampoline for
10860   // out of range calls (which is quite common due to a 32M range limitation of
10861   // branches for Thumb), we can generate the long-call version via
10862   // -mcmodel=large, alleviating the need for the trampoline which may clobber
10863   // IP.
10864 
10865   switch (TM.getCodeModel()) {
10866   case CodeModel::Tiny:
10867     llvm_unreachable("Tiny code model not available on ARM.");
10868   case CodeModel::Small:
10869   case CodeModel::Medium:
10870   case CodeModel::Kernel:
10871     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
10872         .add(predOps(ARMCC::AL))
10873         .addExternalSymbol("__chkstk")
10874         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10875         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10876         .addReg(ARM::R12,
10877                 RegState::Implicit | RegState::Define | RegState::Dead)
10878         .addReg(ARM::CPSR,
10879                 RegState::Implicit | RegState::Define | RegState::Dead);
10880     break;
10881   case CodeModel::Large: {
10882     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
10883     Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
10884 
10885     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
10886       .addExternalSymbol("__chkstk");
10887     BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
10888         .add(predOps(ARMCC::AL))
10889         .addReg(Reg, RegState::Kill)
10890         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10891         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10892         .addReg(ARM::R12,
10893                 RegState::Implicit | RegState::Define | RegState::Dead)
10894         .addReg(ARM::CPSR,
10895                 RegState::Implicit | RegState::Define | RegState::Dead);
10896     break;
10897   }
10898   }
10899 
10900   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
10901       .addReg(ARM::SP, RegState::Kill)
10902       .addReg(ARM::R4, RegState::Kill)
10903       .setMIFlags(MachineInstr::FrameSetup)
10904       .add(predOps(ARMCC::AL))
10905       .add(condCodeOp());
10906 
10907   MI.eraseFromParent();
10908   return MBB;
10909 }
10910 
10911 MachineBasicBlock *
10912 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
10913                                        MachineBasicBlock *MBB) const {
10914   DebugLoc DL = MI.getDebugLoc();
10915   MachineFunction *MF = MBB->getParent();
10916   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10917 
10918   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
10919   MF->insert(++MBB->getIterator(), ContBB);
10920   ContBB->splice(ContBB->begin(), MBB,
10921                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10922   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
10923   MBB->addSuccessor(ContBB);
10924 
10925   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10926   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
10927   MF->push_back(TrapBB);
10928   MBB->addSuccessor(TrapBB);
10929 
10930   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
10931       .addReg(MI.getOperand(0).getReg())
10932       .addImm(0)
10933       .add(predOps(ARMCC::AL));
10934   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
10935       .addMBB(TrapBB)
10936       .addImm(ARMCC::EQ)
10937       .addReg(ARM::CPSR);
10938 
10939   MI.eraseFromParent();
10940   return ContBB;
10941 }
10942 
10943 // The CPSR operand of SelectItr might be missing a kill marker
10944 // because there were multiple uses of CPSR, and ISel didn't know
10945 // which to mark. Figure out whether SelectItr should have had a
10946 // kill marker, and set it if it should. Returns the correct kill
10947 // marker value.
10948 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
10949                                    MachineBasicBlock* BB,
10950                                    const TargetRegisterInfo* TRI) {
10951   // Scan forward through BB for a use/def of CPSR.
10952   MachineBasicBlock::iterator miI(std::next(SelectItr));
10953   for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
10954     const MachineInstr& mi = *miI;
10955     if (mi.readsRegister(ARM::CPSR))
10956       return false;
10957     if (mi.definesRegister(ARM::CPSR))
10958       break; // Should have kill-flag - update below.
10959   }
10960 
10961   // If we hit the end of the block, check whether CPSR is live into a
10962   // successor.
10963   if (miI == BB->end()) {
10964     for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
10965                                           sEnd = BB->succ_end();
10966          sItr != sEnd; ++sItr) {
10967       MachineBasicBlock* succ = *sItr;
10968       if (succ->isLiveIn(ARM::CPSR))
10969         return false;
10970     }
10971   }
10972 
10973   // We found a def, or hit the end of the basic block and CPSR wasn't live
10974   // out. SelectMI should have a kill flag on CPSR.
10975   SelectItr->addRegisterKilled(ARM::CPSR, TRI);
10976   return true;
10977 }
10978 
10979 MachineBasicBlock *
10980 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10981                                                MachineBasicBlock *BB) const {
10982   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10983   DebugLoc dl = MI.getDebugLoc();
10984   bool isThumb2 = Subtarget->isThumb2();
10985   switch (MI.getOpcode()) {
10986   default: {
10987     MI.print(errs());
10988     llvm_unreachable("Unexpected instr type to insert");
10989   }
10990 
10991   // Thumb1 post-indexed loads are really just single-register LDMs.
10992   case ARM::tLDR_postidx: {
10993     MachineOperand Def(MI.getOperand(1));
10994     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
10995         .add(Def)  // Rn_wb
10996         .add(MI.getOperand(2))  // Rn
10997         .add(MI.getOperand(3))  // PredImm
10998         .add(MI.getOperand(4))  // PredReg
10999         .add(MI.getOperand(0))  // Rt
11000         .cloneMemRefs(MI);
11001     MI.eraseFromParent();
11002     return BB;
11003   }
11004 
11005   // The Thumb2 pre-indexed stores have the same MI operands, they just
11006   // define them differently in the .td files from the isel patterns, so
11007   // they need pseudos.
11008   case ARM::t2STR_preidx:
11009     MI.setDesc(TII->get(ARM::t2STR_PRE));
11010     return BB;
11011   case ARM::t2STRB_preidx:
11012     MI.setDesc(TII->get(ARM::t2STRB_PRE));
11013     return BB;
11014   case ARM::t2STRH_preidx:
11015     MI.setDesc(TII->get(ARM::t2STRH_PRE));
11016     return BB;
11017 
11018   case ARM::STRi_preidx:
11019   case ARM::STRBi_preidx: {
11020     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
11021                                                          : ARM::STRB_PRE_IMM;
11022     // Decode the offset.
11023     unsigned Offset = MI.getOperand(4).getImm();
11024     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
11025     Offset = ARM_AM::getAM2Offset(Offset);
11026     if (isSub)
11027       Offset = -Offset;
11028 
11029     MachineMemOperand *MMO = *MI.memoperands_begin();
11030     BuildMI(*BB, MI, dl, TII->get(NewOpc))
11031         .add(MI.getOperand(0)) // Rn_wb
11032         .add(MI.getOperand(1)) // Rt
11033         .add(MI.getOperand(2)) // Rn
11034         .addImm(Offset)        // offset (skip GPR==zero_reg)
11035         .add(MI.getOperand(5)) // pred
11036         .add(MI.getOperand(6))
11037         .addMemOperand(MMO);
11038     MI.eraseFromParent();
11039     return BB;
11040   }
11041   case ARM::STRr_preidx:
11042   case ARM::STRBr_preidx:
11043   case ARM::STRH_preidx: {
11044     unsigned NewOpc;
11045     switch (MI.getOpcode()) {
11046     default: llvm_unreachable("unexpected opcode!");
11047     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
11048     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
11049     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
11050     }
11051     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
11052     for (unsigned i = 0; i < MI.getNumOperands(); ++i)
11053       MIB.add(MI.getOperand(i));
11054     MI.eraseFromParent();
11055     return BB;
11056   }
11057 
11058   case ARM::tMOVCCr_pseudo: {
11059     // To "insert" a SELECT_CC instruction, we actually have to insert the
11060     // diamond control-flow pattern.  The incoming instruction knows the
11061     // destination vreg to set, the condition code register to branch on, the
11062     // true/false values to select between, and a branch opcode to use.
11063     const BasicBlock *LLVM_BB = BB->getBasicBlock();
11064     MachineFunction::iterator It = ++BB->getIterator();
11065 
11066     //  thisMBB:
11067     //  ...
11068     //   TrueVal = ...
11069     //   cmpTY ccX, r1, r2
11070     //   bCC copy1MBB
11071     //   fallthrough --> copy0MBB
11072     MachineBasicBlock *thisMBB  = BB;
11073     MachineFunction *F = BB->getParent();
11074     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11075     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
11076     F->insert(It, copy0MBB);
11077     F->insert(It, sinkMBB);
11078 
11079     // Check whether CPSR is live past the tMOVCCr_pseudo.
11080     const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
11081     if (!MI.killsRegister(ARM::CPSR) &&
11082         !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
11083       copy0MBB->addLiveIn(ARM::CPSR);
11084       sinkMBB->addLiveIn(ARM::CPSR);
11085     }
11086 
11087     // Transfer the remainder of BB and its successor edges to sinkMBB.
11088     sinkMBB->splice(sinkMBB->begin(), BB,
11089                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11090     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11091 
11092     BB->addSuccessor(copy0MBB);
11093     BB->addSuccessor(sinkMBB);
11094 
11095     BuildMI(BB, dl, TII->get(ARM::tBcc))
11096         .addMBB(sinkMBB)
11097         .addImm(MI.getOperand(3).getImm())
11098         .addReg(MI.getOperand(4).getReg());
11099 
11100     //  copy0MBB:
11101     //   %FalseValue = ...
11102     //   # fallthrough to sinkMBB
11103     BB = copy0MBB;
11104 
11105     // Update machine-CFG edges
11106     BB->addSuccessor(sinkMBB);
11107 
11108     //  sinkMBB:
11109     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11110     //  ...
11111     BB = sinkMBB;
11112     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
11113         .addReg(MI.getOperand(1).getReg())
11114         .addMBB(copy0MBB)
11115         .addReg(MI.getOperand(2).getReg())
11116         .addMBB(thisMBB);
11117 
11118     MI.eraseFromParent(); // The pseudo instruction is gone now.
11119     return BB;
11120   }
11121 
11122   case ARM::BCCi64:
11123   case ARM::BCCZi64: {
11124     // If there is an unconditional branch to the other successor, remove it.
11125     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
11126 
11127     // Compare both parts that make up the double comparison separately for
11128     // equality.
11129     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
11130 
11131     Register LHS1 = MI.getOperand(1).getReg();
11132     Register LHS2 = MI.getOperand(2).getReg();
11133     if (RHSisZero) {
11134       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11135           .addReg(LHS1)
11136           .addImm(0)
11137           .add(predOps(ARMCC::AL));
11138       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11139         .addReg(LHS2).addImm(0)
11140         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
11141     } else {
11142       Register RHS1 = MI.getOperand(3).getReg();
11143       Register RHS2 = MI.getOperand(4).getReg();
11144       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11145           .addReg(LHS1)
11146           .addReg(RHS1)
11147           .add(predOps(ARMCC::AL));
11148       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11149         .addReg(LHS2).addReg(RHS2)
11150         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
11151     }
11152 
11153     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
11154     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
11155     if (MI.getOperand(0).getImm() == ARMCC::NE)
11156       std::swap(destMBB, exitMBB);
11157 
11158     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
11159       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
11160     if (isThumb2)
11161       BuildMI(BB, dl, TII->get(ARM::t2B))
11162           .addMBB(exitMBB)
11163           .add(predOps(ARMCC::AL));
11164     else
11165       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
11166 
11167     MI.eraseFromParent(); // The pseudo instruction is gone now.
11168     return BB;
11169   }
11170 
11171   case ARM::Int_eh_sjlj_setjmp:
11172   case ARM::Int_eh_sjlj_setjmp_nofp:
11173   case ARM::tInt_eh_sjlj_setjmp:
11174   case ARM::t2Int_eh_sjlj_setjmp:
11175   case ARM::t2Int_eh_sjlj_setjmp_nofp:
11176     return BB;
11177 
11178   case ARM::Int_eh_sjlj_setup_dispatch:
11179     EmitSjLjDispatchBlock(MI, BB);
11180     return BB;
11181 
11182   case ARM::ABS:
11183   case ARM::t2ABS: {
11184     // To insert an ABS instruction, we have to insert the
11185     // diamond control-flow pattern.  The incoming instruction knows the
11186     // source vreg to test against 0, the destination vreg to set,
11187     // the condition code register to branch on, the
11188     // true/false values to select between, and a branch opcode to use.
11189     // It transforms
11190     //     V1 = ABS V0
11191     // into
11192     //     V2 = MOVS V0
11193     //     BCC                      (branch to SinkBB if V0 >= 0)
11194     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
11195     //     SinkBB: V1 = PHI(V2, V3)
11196     const BasicBlock *LLVM_BB = BB->getBasicBlock();
11197     MachineFunction::iterator BBI = ++BB->getIterator();
11198     MachineFunction *Fn = BB->getParent();
11199     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
11200     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
11201     Fn->insert(BBI, RSBBB);
11202     Fn->insert(BBI, SinkBB);
11203 
11204     Register ABSSrcReg = MI.getOperand(1).getReg();
11205     Register ABSDstReg = MI.getOperand(0).getReg();
11206     bool ABSSrcKIll = MI.getOperand(1).isKill();
11207     bool isThumb2 = Subtarget->isThumb2();
11208     MachineRegisterInfo &MRI = Fn->getRegInfo();
11209     // In Thumb mode S must not be specified if source register is the SP or
11210     // PC and if destination register is the SP, so restrict register class
11211     Register NewRsbDstReg = MRI.createVirtualRegister(
11212         isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
11213 
11214     // Transfer the remainder of BB and its successor edges to sinkMBB.
11215     SinkBB->splice(SinkBB->begin(), BB,
11216                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
11217     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
11218 
11219     BB->addSuccessor(RSBBB);
11220     BB->addSuccessor(SinkBB);
11221 
11222     // fall through to SinkMBB
11223     RSBBB->addSuccessor(SinkBB);
11224 
11225     // insert a cmp at the end of BB
11226     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11227         .addReg(ABSSrcReg)
11228         .addImm(0)
11229         .add(predOps(ARMCC::AL));
11230 
11231     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
11232     BuildMI(BB, dl,
11233       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
11234       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
11235 
11236     // insert rsbri in RSBBB
11237     // Note: BCC and rsbri will be converted into predicated rsbmi
11238     // by if-conversion pass
11239     BuildMI(*RSBBB, RSBBB->begin(), dl,
11240             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
11241         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
11242         .addImm(0)
11243         .add(predOps(ARMCC::AL))
11244         .add(condCodeOp());
11245 
11246     // insert PHI in SinkBB,
11247     // reuse ABSDstReg to not change uses of ABS instruction
11248     BuildMI(*SinkBB, SinkBB->begin(), dl,
11249       TII->get(ARM::PHI), ABSDstReg)
11250       .addReg(NewRsbDstReg).addMBB(RSBBB)
11251       .addReg(ABSSrcReg).addMBB(BB);
11252 
11253     // remove ABS instruction
11254     MI.eraseFromParent();
11255 
11256     // return last added BB
11257     return SinkBB;
11258   }
11259   case ARM::COPY_STRUCT_BYVAL_I32:
11260     ++NumLoopByVals;
11261     return EmitStructByval(MI, BB);
11262   case ARM::WIN__CHKSTK:
11263     return EmitLowered__chkstk(MI, BB);
11264   case ARM::WIN__DBZCHK:
11265     return EmitLowered__dbzchk(MI, BB);
11266   }
11267 }
11268 
11269 /// Attaches vregs to MEMCPY that it will use as scratch registers
11270 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
11271 /// instead of as a custom inserter because we need the use list from the SDNode.
11272 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
11273                                     MachineInstr &MI, const SDNode *Node) {
11274   bool isThumb1 = Subtarget->isThumb1Only();
11275 
11276   DebugLoc DL = MI.getDebugLoc();
11277   MachineFunction *MF = MI.getParent()->getParent();
11278   MachineRegisterInfo &MRI = MF->getRegInfo();
11279   MachineInstrBuilder MIB(*MF, MI);
11280 
11281   // If the new dst/src is unused mark it as dead.
11282   if (!Node->hasAnyUseOfValue(0)) {
11283     MI.getOperand(0).setIsDead(true);
11284   }
11285   if (!Node->hasAnyUseOfValue(1)) {
11286     MI.getOperand(1).setIsDead(true);
11287   }
11288 
11289   // The MEMCPY both defines and kills the scratch registers.
11290   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
11291     Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
11292                                                          : &ARM::GPRRegClass);
11293     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
11294   }
11295 }
11296 
11297 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
11298                                                       SDNode *Node) const {
11299   if (MI.getOpcode() == ARM::MEMCPY) {
11300     attachMEMCPYScratchRegs(Subtarget, MI, Node);
11301     return;
11302   }
11303 
11304   const MCInstrDesc *MCID = &MI.getDesc();
11305   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
11306   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
11307   // operand is still set to noreg. If needed, set the optional operand's
11308   // register to CPSR, and remove the redundant implicit def.
11309   //
11310   // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
11311 
11312   // Rename pseudo opcodes.
11313   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
11314   unsigned ccOutIdx;
11315   if (NewOpc) {
11316     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
11317     MCID = &TII->get(NewOpc);
11318 
11319     assert(MCID->getNumOperands() ==
11320            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
11321         && "converted opcode should be the same except for cc_out"
11322            " (and, on Thumb1, pred)");
11323 
11324     MI.setDesc(*MCID);
11325 
11326     // Add the optional cc_out operand
11327     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
11328 
11329     // On Thumb1, move all input operands to the end, then add the predicate
11330     if (Subtarget->isThumb1Only()) {
11331       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
11332         MI.addOperand(MI.getOperand(1));
11333         MI.RemoveOperand(1);
11334       }
11335 
11336       // Restore the ties
11337       for (unsigned i = MI.getNumOperands(); i--;) {
11338         const MachineOperand& op = MI.getOperand(i);
11339         if (op.isReg() && op.isUse()) {
11340           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
11341           if (DefIdx != -1)
11342             MI.tieOperands(DefIdx, i);
11343         }
11344       }
11345 
11346       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
11347       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
11348       ccOutIdx = 1;
11349     } else
11350       ccOutIdx = MCID->getNumOperands() - 1;
11351   } else
11352     ccOutIdx = MCID->getNumOperands() - 1;
11353 
11354   // Any ARM instruction that sets the 's' bit should specify an optional
11355   // "cc_out" operand in the last operand position.
11356   if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
11357     assert(!NewOpc && "Optional cc_out operand required");
11358     return;
11359   }
11360   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
11361   // since we already have an optional CPSR def.
11362   bool definesCPSR = false;
11363   bool deadCPSR = false;
11364   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
11365        ++i) {
11366     const MachineOperand &MO = MI.getOperand(i);
11367     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
11368       definesCPSR = true;
11369       if (MO.isDead())
11370         deadCPSR = true;
11371       MI.RemoveOperand(i);
11372       break;
11373     }
11374   }
11375   if (!definesCPSR) {
11376     assert(!NewOpc && "Optional cc_out operand required");
11377     return;
11378   }
11379   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
11380   if (deadCPSR) {
11381     assert(!MI.getOperand(ccOutIdx).getReg() &&
11382            "expect uninitialized optional cc_out operand");
11383     // Thumb1 instructions must have the S bit even if the CPSR is dead.
11384     if (!Subtarget->isThumb1Only())
11385       return;
11386   }
11387 
11388   // If this instruction was defined with an optional CPSR def and its dag node
11389   // had a live implicit CPSR def, then activate the optional CPSR def.
11390   MachineOperand &MO = MI.getOperand(ccOutIdx);
11391   MO.setReg(ARM::CPSR);
11392   MO.setIsDef(true);
11393 }
11394 
11395 //===----------------------------------------------------------------------===//
11396 //                           ARM Optimization Hooks
11397 //===----------------------------------------------------------------------===//
11398 
11399 // Helper function that checks if N is a null or all ones constant.
11400 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
11401   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
11402 }
11403 
11404 // Return true if N is conditionally 0 or all ones.
11405 // Detects these expressions where cc is an i1 value:
11406 //
11407 //   (select cc 0, y)   [AllOnes=0]
11408 //   (select cc y, 0)   [AllOnes=0]
11409 //   (zext cc)          [AllOnes=0]
11410 //   (sext cc)          [AllOnes=0/1]
11411 //   (select cc -1, y)  [AllOnes=1]
11412 //   (select cc y, -1)  [AllOnes=1]
11413 //
11414 // Invert is set when N is the null/all ones constant when CC is false.
11415 // OtherOp is set to the alternative value of N.
11416 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
11417                                        SDValue &CC, bool &Invert,
11418                                        SDValue &OtherOp,
11419                                        SelectionDAG &DAG) {
11420   switch (N->getOpcode()) {
11421   default: return false;
11422   case ISD::SELECT: {
11423     CC = N->getOperand(0);
11424     SDValue N1 = N->getOperand(1);
11425     SDValue N2 = N->getOperand(2);
11426     if (isZeroOrAllOnes(N1, AllOnes)) {
11427       Invert = false;
11428       OtherOp = N2;
11429       return true;
11430     }
11431     if (isZeroOrAllOnes(N2, AllOnes)) {
11432       Invert = true;
11433       OtherOp = N1;
11434       return true;
11435     }
11436     return false;
11437   }
11438   case ISD::ZERO_EXTEND:
11439     // (zext cc) can never be the all ones value.
11440     if (AllOnes)
11441       return false;
11442     LLVM_FALLTHROUGH;
11443   case ISD::SIGN_EXTEND: {
11444     SDLoc dl(N);
11445     EVT VT = N->getValueType(0);
11446     CC = N->getOperand(0);
11447     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
11448       return false;
11449     Invert = !AllOnes;
11450     if (AllOnes)
11451       // When looking for an AllOnes constant, N is an sext, and the 'other'
11452       // value is 0.
11453       OtherOp = DAG.getConstant(0, dl, VT);
11454     else if (N->getOpcode() == ISD::ZERO_EXTEND)
11455       // When looking for a 0 constant, N can be zext or sext.
11456       OtherOp = DAG.getConstant(1, dl, VT);
11457     else
11458       OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
11459                                 VT);
11460     return true;
11461   }
11462   }
11463 }
11464 
11465 // Combine a constant select operand into its use:
11466 //
11467 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
11468 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
11469 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
11470 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
11471 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
11472 //
11473 // The transform is rejected if the select doesn't have a constant operand that
11474 // is null, or all ones when AllOnes is set.
11475 //
11476 // Also recognize sext/zext from i1:
11477 //
11478 //   (add (zext cc), x) -> (select cc (add x, 1), x)
11479 //   (add (sext cc), x) -> (select cc (add x, -1), x)
11480 //
11481 // These transformations eventually create predicated instructions.
11482 //
11483 // @param N       The node to transform.
11484 // @param Slct    The N operand that is a select.
11485 // @param OtherOp The other N operand (x above).
11486 // @param DCI     Context.
11487 // @param AllOnes Require the select constant to be all ones instead of null.
11488 // @returns The new node, or SDValue() on failure.
11489 static
11490 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
11491                             TargetLowering::DAGCombinerInfo &DCI,
11492                             bool AllOnes = false) {
11493   SelectionDAG &DAG = DCI.DAG;
11494   EVT VT = N->getValueType(0);
11495   SDValue NonConstantVal;
11496   SDValue CCOp;
11497   bool SwapSelectOps;
11498   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
11499                                   NonConstantVal, DAG))
11500     return SDValue();
11501 
11502   // Slct is now know to be the desired identity constant when CC is true.
11503   SDValue TrueVal = OtherOp;
11504   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
11505                                  OtherOp, NonConstantVal);
11506   // Unless SwapSelectOps says CC should be false.
11507   if (SwapSelectOps)
11508     std::swap(TrueVal, FalseVal);
11509 
11510   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
11511                      CCOp, TrueVal, FalseVal);
11512 }
11513 
11514 // Attempt combineSelectAndUse on each operand of a commutative operator N.
11515 static
11516 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
11517                                        TargetLowering::DAGCombinerInfo &DCI) {
11518   SDValue N0 = N->getOperand(0);
11519   SDValue N1 = N->getOperand(1);
11520   if (N0.getNode()->hasOneUse())
11521     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
11522       return Result;
11523   if (N1.getNode()->hasOneUse())
11524     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
11525       return Result;
11526   return SDValue();
11527 }
11528 
11529 static bool IsVUZPShuffleNode(SDNode *N) {
11530   // VUZP shuffle node.
11531   if (N->getOpcode() == ARMISD::VUZP)
11532     return true;
11533 
11534   // "VUZP" on i32 is an alias for VTRN.
11535   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
11536     return true;
11537 
11538   return false;
11539 }
11540 
11541 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
11542                                  TargetLowering::DAGCombinerInfo &DCI,
11543                                  const ARMSubtarget *Subtarget) {
11544   // Look for ADD(VUZP.0, VUZP.1).
11545   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
11546       N0 == N1)
11547    return SDValue();
11548 
11549   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
11550   if (!N->getValueType(0).is64BitVector())
11551     return SDValue();
11552 
11553   // Generate vpadd.
11554   SelectionDAG &DAG = DCI.DAG;
11555   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11556   SDLoc dl(N);
11557   SDNode *Unzip = N0.getNode();
11558   EVT VT = N->getValueType(0);
11559 
11560   SmallVector<SDValue, 8> Ops;
11561   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
11562                                 TLI.getPointerTy(DAG.getDataLayout())));
11563   Ops.push_back(Unzip->getOperand(0));
11564   Ops.push_back(Unzip->getOperand(1));
11565 
11566   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11567 }
11568 
11569 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11570                                       TargetLowering::DAGCombinerInfo &DCI,
11571                                       const ARMSubtarget *Subtarget) {
11572   // Check for two extended operands.
11573   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
11574         N1.getOpcode() == ISD::SIGN_EXTEND) &&
11575       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
11576         N1.getOpcode() == ISD::ZERO_EXTEND))
11577     return SDValue();
11578 
11579   SDValue N00 = N0.getOperand(0);
11580   SDValue N10 = N1.getOperand(0);
11581 
11582   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
11583   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
11584       N00 == N10)
11585     return SDValue();
11586 
11587   // We only recognize Q register paddl here; this can't be reached until
11588   // after type legalization.
11589   if (!N00.getValueType().is64BitVector() ||
11590       !N0.getValueType().is128BitVector())
11591     return SDValue();
11592 
11593   // Generate vpaddl.
11594   SelectionDAG &DAG = DCI.DAG;
11595   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11596   SDLoc dl(N);
11597   EVT VT = N->getValueType(0);
11598 
11599   SmallVector<SDValue, 8> Ops;
11600   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
11601   unsigned Opcode;
11602   if (N0.getOpcode() == ISD::SIGN_EXTEND)
11603     Opcode = Intrinsic::arm_neon_vpaddls;
11604   else
11605     Opcode = Intrinsic::arm_neon_vpaddlu;
11606   Ops.push_back(DAG.getConstant(Opcode, dl,
11607                                 TLI.getPointerTy(DAG.getDataLayout())));
11608   EVT ElemTy = N00.getValueType().getVectorElementType();
11609   unsigned NumElts = VT.getVectorNumElements();
11610   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
11611   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
11612                                N00.getOperand(0), N00.getOperand(1));
11613   Ops.push_back(Concat);
11614 
11615   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11616 }
11617 
11618 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
11619 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
11620 // much easier to match.
11621 static SDValue
11622 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11623                                TargetLowering::DAGCombinerInfo &DCI,
11624                                const ARMSubtarget *Subtarget) {
11625   // Only perform optimization if after legalize, and if NEON is available. We
11626   // also expected both operands to be BUILD_VECTORs.
11627   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
11628       || N0.getOpcode() != ISD::BUILD_VECTOR
11629       || N1.getOpcode() != ISD::BUILD_VECTOR)
11630     return SDValue();
11631 
11632   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
11633   EVT VT = N->getValueType(0);
11634   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
11635     return SDValue();
11636 
11637   // Check that the vector operands are of the right form.
11638   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
11639   // operands, where N is the size of the formed vector.
11640   // Each EXTRACT_VECTOR should have the same input vector and odd or even
11641   // index such that we have a pair wise add pattern.
11642 
11643   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
11644   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11645     return SDValue();
11646   SDValue Vec = N0->getOperand(0)->getOperand(0);
11647   SDNode *V = Vec.getNode();
11648   unsigned nextIndex = 0;
11649 
11650   // For each operands to the ADD which are BUILD_VECTORs,
11651   // check to see if each of their operands are an EXTRACT_VECTOR with
11652   // the same vector and appropriate index.
11653   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
11654     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
11655         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11656 
11657       SDValue ExtVec0 = N0->getOperand(i);
11658       SDValue ExtVec1 = N1->getOperand(i);
11659 
11660       // First operand is the vector, verify its the same.
11661       if (V != ExtVec0->getOperand(0).getNode() ||
11662           V != ExtVec1->getOperand(0).getNode())
11663         return SDValue();
11664 
11665       // Second is the constant, verify its correct.
11666       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
11667       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
11668 
11669       // For the constant, we want to see all the even or all the odd.
11670       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
11671           || C1->getZExtValue() != nextIndex+1)
11672         return SDValue();
11673 
11674       // Increment index.
11675       nextIndex+=2;
11676     } else
11677       return SDValue();
11678   }
11679 
11680   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
11681   // we're using the entire input vector, otherwise there's a size/legality
11682   // mismatch somewhere.
11683   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
11684       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
11685     return SDValue();
11686 
11687   // Create VPADDL node.
11688   SelectionDAG &DAG = DCI.DAG;
11689   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11690 
11691   SDLoc dl(N);
11692 
11693   // Build operand list.
11694   SmallVector<SDValue, 8> Ops;
11695   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
11696                                 TLI.getPointerTy(DAG.getDataLayout())));
11697 
11698   // Input is the vector.
11699   Ops.push_back(Vec);
11700 
11701   // Get widened type and narrowed type.
11702   MVT widenType;
11703   unsigned numElem = VT.getVectorNumElements();
11704 
11705   EVT inputLaneType = Vec.getValueType().getVectorElementType();
11706   switch (inputLaneType.getSimpleVT().SimpleTy) {
11707     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
11708     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
11709     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
11710     default:
11711       llvm_unreachable("Invalid vector element type for padd optimization.");
11712   }
11713 
11714   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
11715   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
11716   return DAG.getNode(ExtOp, dl, VT, tmp);
11717 }
11718 
11719 static SDValue findMUL_LOHI(SDValue V) {
11720   if (V->getOpcode() == ISD::UMUL_LOHI ||
11721       V->getOpcode() == ISD::SMUL_LOHI)
11722     return V;
11723   return SDValue();
11724 }
11725 
11726 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
11727                                         TargetLowering::DAGCombinerInfo &DCI,
11728                                         const ARMSubtarget *Subtarget) {
11729   if (!Subtarget->hasBaseDSP())
11730     return SDValue();
11731 
11732   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
11733   // accumulates the product into a 64-bit value. The 16-bit values will
11734   // be sign extended somehow or SRA'd into 32-bit values
11735   // (addc (adde (mul 16bit, 16bit), lo), hi)
11736   SDValue Mul = AddcNode->getOperand(0);
11737   SDValue Lo = AddcNode->getOperand(1);
11738   if (Mul.getOpcode() != ISD::MUL) {
11739     Lo = AddcNode->getOperand(0);
11740     Mul = AddcNode->getOperand(1);
11741     if (Mul.getOpcode() != ISD::MUL)
11742       return SDValue();
11743   }
11744 
11745   SDValue SRA = AddeNode->getOperand(0);
11746   SDValue Hi = AddeNode->getOperand(1);
11747   if (SRA.getOpcode() != ISD::SRA) {
11748     SRA = AddeNode->getOperand(1);
11749     Hi = AddeNode->getOperand(0);
11750     if (SRA.getOpcode() != ISD::SRA)
11751       return SDValue();
11752   }
11753   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
11754     if (Const->getZExtValue() != 31)
11755       return SDValue();
11756   } else
11757     return SDValue();
11758 
11759   if (SRA.getOperand(0) != Mul)
11760     return SDValue();
11761 
11762   SelectionDAG &DAG = DCI.DAG;
11763   SDLoc dl(AddcNode);
11764   unsigned Opcode = 0;
11765   SDValue Op0;
11766   SDValue Op1;
11767 
11768   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
11769     Opcode = ARMISD::SMLALBB;
11770     Op0 = Mul.getOperand(0);
11771     Op1 = Mul.getOperand(1);
11772   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
11773     Opcode = ARMISD::SMLALBT;
11774     Op0 = Mul.getOperand(0);
11775     Op1 = Mul.getOperand(1).getOperand(0);
11776   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
11777     Opcode = ARMISD::SMLALTB;
11778     Op0 = Mul.getOperand(0).getOperand(0);
11779     Op1 = Mul.getOperand(1);
11780   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
11781     Opcode = ARMISD::SMLALTT;
11782     Op0 = Mul->getOperand(0).getOperand(0);
11783     Op1 = Mul->getOperand(1).getOperand(0);
11784   }
11785 
11786   if (!Op0 || !Op1)
11787     return SDValue();
11788 
11789   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
11790                               Op0, Op1, Lo, Hi);
11791   // Replace the ADDs' nodes uses by the MLA node's values.
11792   SDValue HiMLALResult(SMLAL.getNode(), 1);
11793   SDValue LoMLALResult(SMLAL.getNode(), 0);
11794 
11795   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
11796   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
11797 
11798   // Return original node to notify the driver to stop replacing.
11799   SDValue resNode(AddcNode, 0);
11800   return resNode;
11801 }
11802 
11803 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
11804                                      TargetLowering::DAGCombinerInfo &DCI,
11805                                      const ARMSubtarget *Subtarget) {
11806   // Look for multiply add opportunities.
11807   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
11808   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
11809   // a glue link from the first add to the second add.
11810   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
11811   // a S/UMLAL instruction.
11812   //                  UMUL_LOHI
11813   //                 / :lo    \ :hi
11814   //                V          \          [no multiline comment]
11815   //    loAdd ->  ADDC         |
11816   //                 \ :carry /
11817   //                  V      V
11818   //                    ADDE   <- hiAdd
11819   //
11820   // In the special case where only the higher part of a signed result is used
11821   // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
11822   // a constant with the exact value of 0x80000000, we recognize we are dealing
11823   // with a "rounded multiply and add" (or subtract) and transform it into
11824   // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
11825 
11826   assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
11827           AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
11828          "Expect an ADDE or SUBE");
11829 
11830   assert(AddeSubeNode->getNumOperands() == 3 &&
11831          AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
11832          "ADDE node has the wrong inputs");
11833 
11834   // Check that we are chained to the right ADDC or SUBC node.
11835   SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
11836   if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11837        AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
11838       (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
11839        AddcSubcNode->getOpcode() != ARMISD::SUBC))
11840     return SDValue();
11841 
11842   SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
11843   SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);
11844 
11845   // Check if the two operands are from the same mul_lohi node.
11846   if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
11847     return SDValue();
11848 
11849   assert(AddcSubcNode->getNumValues() == 2 &&
11850          AddcSubcNode->getValueType(0) == MVT::i32 &&
11851          "Expect ADDC with two result values. First: i32");
11852 
11853   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
11854   // maybe a SMLAL which multiplies two 16-bit values.
11855   if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11856       AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
11857       AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
11858       AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
11859       AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
11860     return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);
11861 
11862   // Check for the triangle shape.
11863   SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
11864   SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);
11865 
11866   // Make sure that the ADDE/SUBE operands are not coming from the same node.
11867   if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
11868     return SDValue();
11869 
11870   // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
11871   bool IsLeftOperandMUL = false;
11872   SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
11873   if (MULOp == SDValue())
11874     MULOp = findMUL_LOHI(AddeSubeOp1);
11875   else
11876     IsLeftOperandMUL = true;
11877   if (MULOp == SDValue())
11878     return SDValue();
11879 
11880   // Figure out the right opcode.
11881   unsigned Opc = MULOp->getOpcode();
11882   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
11883 
11884   // Figure out the high and low input values to the MLAL node.
11885   SDValue *HiAddSub = nullptr;
11886   SDValue *LoMul = nullptr;
11887   SDValue *LowAddSub = nullptr;
11888 
11889   // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
11890   if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
11891     return SDValue();
11892 
11893   if (IsLeftOperandMUL)
11894     HiAddSub = &AddeSubeOp1;
11895   else
11896     HiAddSub = &AddeSubeOp0;
11897 
11898   // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
11899   // whose low result is fed to the ADDC/SUBC we are checking.
11900 
11901   if (AddcSubcOp0 == MULOp.getValue(0)) {
11902     LoMul = &AddcSubcOp0;
11903     LowAddSub = &AddcSubcOp1;
11904   }
11905   if (AddcSubcOp1 == MULOp.getValue(0)) {
11906     LoMul = &AddcSubcOp1;
11907     LowAddSub = &AddcSubcOp0;
11908   }
11909 
11910   if (!LoMul)
11911     return SDValue();
11912 
11913   // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
11914   // the replacement below will create a cycle.
11915   if (AddcSubcNode == HiAddSub->getNode() ||
11916       AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
11917     return SDValue();
11918 
11919   // Create the merged node.
11920   SelectionDAG &DAG = DCI.DAG;
11921 
11922   // Start building operand list.
11923   SmallVector<SDValue, 8> Ops;
11924   Ops.push_back(LoMul->getOperand(0));
11925   Ops.push_back(LoMul->getOperand(1));
11926 
11927   // Check whether we can use SMMLAR, SMMLSR or SMMULR instead.  For this to be
11928   // the case, we must be doing signed multiplication and only use the higher
11929   // part of the result of the MLAL, furthermore the LowAddSub must be a constant
11930   // addition or subtraction with the value of 0x800000.
11931   if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
11932       FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
11933       LowAddSub->getNode()->getOpcode() == ISD::Constant &&
11934       static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
11935           0x80000000) {
11936     Ops.push_back(*HiAddSub);
11937     if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
11938       FinalOpc = ARMISD::SMMLSR;
11939     } else {
11940       FinalOpc = ARMISD::SMMLAR;
11941     }
11942     SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
11943     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);
11944 
11945     return SDValue(AddeSubeNode, 0);
11946   } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
11947     // SMMLS is generated during instruction selection and the rest of this
11948     // function can not handle the case where AddcSubcNode is a SUBC.
11949     return SDValue();
11950 
11951   // Finish building the operand list for {U/S}MLAL
11952   Ops.push_back(*LowAddSub);
11953   Ops.push_back(*HiAddSub);
11954 
11955   SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
11956                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
11957 
11958   // Replace the ADDs' nodes uses by the MLA node's values.
11959   SDValue HiMLALResult(MLALNode.getNode(), 1);
11960   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);
11961 
11962   SDValue LoMLALResult(MLALNode.getNode(), 0);
11963   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);
11964 
11965   // Return original node to notify the driver to stop replacing.
11966   return SDValue(AddeSubeNode, 0);
11967 }
11968 
11969 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
11970                                       TargetLowering::DAGCombinerInfo &DCI,
11971                                       const ARMSubtarget *Subtarget) {
11972   // UMAAL is similar to UMLAL except that it adds two unsigned values.
11973   // While trying to combine for the other MLAL nodes, first search for the
11974   // chance to use UMAAL. Check if Addc uses a node which has already
11975   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
11976   // as the addend, and it's handled in PerformUMLALCombine.
11977 
11978   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
11979     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11980 
11981   // Check that we have a glued ADDC node.
11982   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
11983   if (AddcNode->getOpcode() != ARMISD::ADDC)
11984     return SDValue();
11985 
11986   // Find the converted UMAAL or quit if it doesn't exist.
11987   SDNode *UmlalNode = nullptr;
11988   SDValue AddHi;
11989   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
11990     UmlalNode = AddcNode->getOperand(0).getNode();
11991     AddHi = AddcNode->getOperand(1);
11992   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
11993     UmlalNode = AddcNode->getOperand(1).getNode();
11994     AddHi = AddcNode->getOperand(0);
11995   } else {
11996     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11997   }
11998 
11999   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
12000   // the ADDC as well as Zero.
12001   if (!isNullConstant(UmlalNode->getOperand(3)))
12002     return SDValue();
12003 
12004   if ((isNullConstant(AddeNode->getOperand(0)) &&
12005        AddeNode->getOperand(1).getNode() == UmlalNode) ||
12006       (AddeNode->getOperand(0).getNode() == UmlalNode &&
12007        isNullConstant(AddeNode->getOperand(1)))) {
12008     SelectionDAG &DAG = DCI.DAG;
12009     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
12010                       UmlalNode->getOperand(2), AddHi };
12011     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
12012                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
12013 
12014     // Replace the ADDs' nodes uses by the UMAAL node's values.
12015     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
12016     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
12017 
12018     // Return original node to notify the driver to stop replacing.
12019     return SDValue(AddeNode, 0);
12020   }
12021   return SDValue();
12022 }
12023 
12024 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
12025                                    const ARMSubtarget *Subtarget) {
12026   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
12027     return SDValue();
12028 
12029   // Check that we have a pair of ADDC and ADDE as operands.
12030   // Both addends of the ADDE must be zero.
12031   SDNode* AddcNode = N->getOperand(2).getNode();
12032   SDNode* AddeNode = N->getOperand(3).getNode();
12033   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
12034       (AddeNode->getOpcode() == ARMISD::ADDE) &&
12035       isNullConstant(AddeNode->getOperand(0)) &&
12036       isNullConstant(AddeNode->getOperand(1)) &&
12037       (AddeNode->getOperand(2).getNode() == AddcNode))
12038     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
12039                        DAG.getVTList(MVT::i32, MVT::i32),
12040                        {N->getOperand(0), N->getOperand(1),
12041                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
12042   else
12043     return SDValue();
12044 }
12045 
12046 static SDValue PerformAddcSubcCombine(SDNode *N,
12047                                       TargetLowering::DAGCombinerInfo &DCI,
12048                                       const ARMSubtarget *Subtarget) {
12049   SelectionDAG &DAG(DCI.DAG);
12050 
12051   if (N->getOpcode() == ARMISD::SUBC) {
12052     // (SUBC (ADDE 0, 0, C), 1) -> C
12053     SDValue LHS = N->getOperand(0);
12054     SDValue RHS = N->getOperand(1);
12055     if (LHS->getOpcode() == ARMISD::ADDE &&
12056         isNullConstant(LHS->getOperand(0)) &&
12057         isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
12058       return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
12059     }
12060   }
12061 
12062   if (Subtarget->isThumb1Only()) {
12063     SDValue RHS = N->getOperand(1);
12064     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
12065       int32_t imm = C->getSExtValue();
12066       if (imm < 0 && imm > std::numeric_limits<int>::min()) {
12067         SDLoc DL(N);
12068         RHS = DAG.getConstant(-imm, DL, MVT::i32);
12069         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
12070                                                            : ARMISD::ADDC;
12071         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
12072       }
12073     }
12074   }
12075 
12076   return SDValue();
12077 }
12078 
12079 static SDValue PerformAddeSubeCombine(SDNode *N,
12080                                       TargetLowering::DAGCombinerInfo &DCI,
12081                                       const ARMSubtarget *Subtarget) {
12082   if (Subtarget->isThumb1Only()) {
12083     SelectionDAG &DAG = DCI.DAG;
12084     SDValue RHS = N->getOperand(1);
12085     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
12086       int64_t imm = C->getSExtValue();
12087       if (imm < 0) {
12088         SDLoc DL(N);
12089 
12090         // The with-carry-in form matches bitwise not instead of the negation.
12091         // Effectively, the inverse interpretation of the carry flag already
12092         // accounts for part of the negation.
12093         RHS = DAG.getConstant(~imm, DL, MVT::i32);
12094 
12095         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
12096                                                            : ARMISD::ADDE;
12097         return DAG.getNode(Opcode, DL, N->getVTList(),
12098                            N->getOperand(0), RHS, N->getOperand(2));
12099       }
12100     }
12101   } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
12102     return AddCombineTo64bitMLAL(N, DCI, Subtarget);
12103   }
12104   return SDValue();
12105 }
12106 
12107 static SDValue PerformVSELECTCombine(SDNode *N,
12108                                      TargetLowering::DAGCombinerInfo &DCI,
12109                                      const ARMSubtarget *Subtarget) {
12110   // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs).
12111   //
12112   // We need to re-implement this optimization here as the implementation in the
12113   // Target-Independent DAGCombiner does not handle the kind of constant we make
12114   // (it calls isConstOrConstSplat with AllowTruncation set to false - and for
12115   // good reason, allowing truncation there would break other targets).
12116   //
12117   // Currently, this is only done for MVE, as it's the only target that benefits
12118   // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL).
12119   if (!Subtarget->hasMVEIntegerOps())
12120     return SDValue();
12121 
12122   if (N->getOperand(0).getOpcode() != ISD::XOR)
12123     return SDValue();
12124   SDValue XOR = N->getOperand(0);
12125 
12126   // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s.
12127   // It is important to check with truncation allowed as the BUILD_VECTORs we
12128   // generate in those situations will truncate their operands.
12129   ConstantSDNode *Const =
12130       isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false,
12131                           /*AllowTruncation*/ true);
12132   if (!Const || !Const->isOne())
12133     return SDValue();
12134 
12135   // Rewrite into vselect(cond, rhs, lhs).
12136   SDValue Cond = XOR->getOperand(0);
12137   SDValue LHS = N->getOperand(1);
12138   SDValue RHS = N->getOperand(2);
12139   EVT Type = N->getValueType(0);
12140   return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS);
12141 }
12142 
12143 static SDValue PerformABSCombine(SDNode *N,
12144                                   TargetLowering::DAGCombinerInfo &DCI,
12145                                   const ARMSubtarget *Subtarget) {
12146   SDValue res;
12147   SelectionDAG &DAG = DCI.DAG;
12148   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12149 
12150   if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
12151     return SDValue();
12152 
12153   if (!TLI.expandABS(N, res, DAG))
12154       return SDValue();
12155 
12156   return res;
12157 }
12158 
12159 /// PerformADDECombine - Target-specific dag combine transform from
12160 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
12161 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
12162 static SDValue PerformADDECombine(SDNode *N,
12163                                   TargetLowering::DAGCombinerInfo &DCI,
12164                                   const ARMSubtarget *Subtarget) {
12165   // Only ARM and Thumb2 support UMLAL/SMLAL.
12166   if (Subtarget->isThumb1Only())
12167     return PerformAddeSubeCombine(N, DCI, Subtarget);
12168 
12169   // Only perform the checks after legalize when the pattern is available.
12170   if (DCI.isBeforeLegalize()) return SDValue();
12171 
12172   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
12173 }
12174 
12175 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
12176 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
12177 /// called with the default operands, and if that fails, with commuted
12178 /// operands.
12179 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
12180                                           TargetLowering::DAGCombinerInfo &DCI,
12181                                           const ARMSubtarget *Subtarget){
12182   // Attempt to create vpadd for this add.
12183   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
12184     return Result;
12185 
12186   // Attempt to create vpaddl for this add.
12187   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
12188     return Result;
12189   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
12190                                                       Subtarget))
12191     return Result;
12192 
12193   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
12194   if (N0.getNode()->hasOneUse())
12195     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
12196       return Result;
12197   return SDValue();
12198 }
12199 
12200 static SDValue PerformADDVecReduce(SDNode *N,
12201                                    TargetLowering::DAGCombinerInfo &DCI,
12202                                    const ARMSubtarget *Subtarget) {
12203   if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64)
12204     return SDValue();
12205 
12206   SDValue N0 = N->getOperand(0);
12207   SDValue N1 = N->getOperand(1);
12208 
12209   // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this
12210   // will look like:
12211   //   t1: i32,i32 = ARMISD::VADDLVs x
12212   //   t2: i64 = build_pair t1, t1:1
12213   //   t3: i64 = add t2, y
12214   // We also need to check for sext / zext and commutitive adds.
12215   auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA,
12216                            SDValue NB) {
12217     if (NB->getOpcode() != ISD::BUILD_PAIR)
12218       return SDValue();
12219     SDValue VecRed = NB->getOperand(0);
12220     if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 ||
12221         NB->getOperand(1) != SDValue(VecRed.getNode(), 1))
12222       return SDValue();
12223 
12224     SDLoc dl(N);
12225     SmallVector<SDValue, 4> Ops;
12226     Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
12227                                   DCI.DAG.getConstant(0, dl, MVT::i32)));
12228     Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
12229                                   DCI.DAG.getConstant(1, dl, MVT::i32)));
12230     for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++)
12231       Ops.push_back(VecRed->getOperand(i));
12232     SDValue Red = DCI.DAG.getNode(OpcodeA, dl,
12233                                   DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops);
12234     return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red,
12235                            SDValue(Red.getNode(), 1));
12236   };
12237 
12238   if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1))
12239     return M;
12240   if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1))
12241     return M;
12242   if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0))
12243     return M;
12244   if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0))
12245     return M;
12246   if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1))
12247     return M;
12248   if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1))
12249     return M;
12250   if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0))
12251     return M;
12252   if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0))
12253     return M;
12254   if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1))
12255     return M;
12256   if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1))
12257     return M;
12258   if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0))
12259     return M;
12260   if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0))
12261     return M;
12262   return SDValue();
12263 }
12264 
12265 bool
12266 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
12267                                                  CombineLevel Level) const {
12268   if (Level == BeforeLegalizeTypes)
12269     return true;
12270 
12271   if (N->getOpcode() != ISD::SHL)
12272     return true;
12273 
12274   if (Subtarget->isThumb1Only()) {
12275     // Avoid making expensive immediates by commuting shifts. (This logic
12276     // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
12277     // for free.)
12278     if (N->getOpcode() != ISD::SHL)
12279       return true;
12280     SDValue N1 = N->getOperand(0);
12281     if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
12282         N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
12283       return true;
12284     if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
12285       if (Const->getAPIntValue().ult(256))
12286         return false;
12287       if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
12288           Const->getAPIntValue().sgt(-256))
12289         return false;
12290     }
12291     return true;
12292   }
12293 
12294   // Turn off commute-with-shift transform after legalization, so it doesn't
12295   // conflict with PerformSHLSimplify.  (We could try to detect when
12296   // PerformSHLSimplify would trigger more precisely, but it isn't
12297   // really necessary.)
12298   return false;
12299 }
12300 
12301 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
12302     const SDNode *N, CombineLevel Level) const {
12303   if (!Subtarget->isThumb1Only())
12304     return true;
12305 
12306   if (Level == BeforeLegalizeTypes)
12307     return true;
12308 
12309   return false;
12310 }
12311 
12312 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
12313   if (!Subtarget->hasNEON()) {
12314     if (Subtarget->isThumb1Only())
12315       return VT.getScalarSizeInBits() <= 32;
12316     return true;
12317   }
12318   return VT.isScalarInteger();
12319 }
12320 
12321 static SDValue PerformSHLSimplify(SDNode *N,
12322                                 TargetLowering::DAGCombinerInfo &DCI,
12323                                 const ARMSubtarget *ST) {
12324   // Allow the generic combiner to identify potential bswaps.
12325   if (DCI.isBeforeLegalize())
12326     return SDValue();
12327 
12328   // DAG combiner will fold:
12329   // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
12330   // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
12331   // Other code patterns that can be also be modified have the following form:
12332   // b + ((a << 1) | 510)
12333   // b + ((a << 1) & 510)
12334   // b + ((a << 1) ^ 510)
12335   // b + ((a << 1) + 510)
12336 
12337   // Many instructions can  perform the shift for free, but it requires both
12338   // the operands to be registers. If c1 << c2 is too large, a mov immediate
12339   // instruction will needed. So, unfold back to the original pattern if:
12340   // - if c1 and c2 are small enough that they don't require mov imms.
12341   // - the user(s) of the node can perform an shl
12342 
12343   // No shifted operands for 16-bit instructions.
12344   if (ST->isThumb() && ST->isThumb1Only())
12345     return SDValue();
12346 
12347   // Check that all the users could perform the shl themselves.
12348   for (auto U : N->uses()) {
12349     switch(U->getOpcode()) {
12350     default:
12351       return SDValue();
12352     case ISD::SUB:
12353     case ISD::ADD:
12354     case ISD::AND:
12355     case ISD::OR:
12356     case ISD::XOR:
12357     case ISD::SETCC:
12358     case ARMISD::CMP:
12359       // Check that the user isn't already using a constant because there
12360       // aren't any instructions that support an immediate operand and a
12361       // shifted operand.
12362       if (isa<ConstantSDNode>(U->getOperand(0)) ||
12363           isa<ConstantSDNode>(U->getOperand(1)))
12364         return SDValue();
12365 
12366       // Check that it's not already using a shift.
12367       if (U->getOperand(0).getOpcode() == ISD::SHL ||
12368           U->getOperand(1).getOpcode() == ISD::SHL)
12369         return SDValue();
12370       break;
12371     }
12372   }
12373 
12374   if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
12375       N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
12376     return SDValue();
12377 
12378   if (N->getOperand(0).getOpcode() != ISD::SHL)
12379     return SDValue();
12380 
12381   SDValue SHL = N->getOperand(0);
12382 
12383   auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
12384   auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
12385   if (!C1ShlC2 || !C2)
12386     return SDValue();
12387 
12388   APInt C2Int = C2->getAPIntValue();
12389   APInt C1Int = C1ShlC2->getAPIntValue();
12390 
12391   // Check that performing a lshr will not lose any information.
12392   APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(),
12393                                      C2Int.getBitWidth() - C2->getZExtValue());
12394   if ((C1Int & Mask) != C1Int)
12395     return SDValue();
12396 
12397   // Shift the first constant.
12398   C1Int.lshrInPlace(C2Int);
12399 
12400   // The immediates are encoded as an 8-bit value that can be rotated.
12401   auto LargeImm = [](const APInt &Imm) {
12402     unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
12403     return Imm.getBitWidth() - Zeros > 8;
12404   };
12405 
12406   if (LargeImm(C1Int) || LargeImm(C2Int))
12407     return SDValue();
12408 
12409   SelectionDAG &DAG = DCI.DAG;
12410   SDLoc dl(N);
12411   SDValue X = SHL.getOperand(0);
12412   SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
12413                               DAG.getConstant(C1Int, dl, MVT::i32));
12414   // Shift left to compensate for the lshr of C1Int.
12415   SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
12416 
12417   LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
12418              SHL.dump(); N->dump());
12419   LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
12420   return Res;
12421 }
12422 
12423 
12424 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
12425 ///
12426 static SDValue PerformADDCombine(SDNode *N,
12427                                  TargetLowering::DAGCombinerInfo &DCI,
12428                                  const ARMSubtarget *Subtarget) {
12429   SDValue N0 = N->getOperand(0);
12430   SDValue N1 = N->getOperand(1);
12431 
12432   // Only works one way, because it needs an immediate operand.
12433   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12434     return Result;
12435 
12436   if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget))
12437     return Result;
12438 
12439   // First try with the default operand order.
12440   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
12441     return Result;
12442 
12443   // If that didn't work, try again with the operands commuted.
12444   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
12445 }
12446 
12447 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
12448 ///
12449 static SDValue PerformSUBCombine(SDNode *N,
12450                                  TargetLowering::DAGCombinerInfo &DCI,
12451                                  const ARMSubtarget *Subtarget) {
12452   SDValue N0 = N->getOperand(0);
12453   SDValue N1 = N->getOperand(1);
12454 
12455   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
12456   if (N1.getNode()->hasOneUse())
12457     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
12458       return Result;
12459 
12460   if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector())
12461     return SDValue();
12462 
12463   // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x))
12464   // so that we can readily pattern match more mve instructions which can use
12465   // a scalar operand.
12466   SDValue VDup = N->getOperand(1);
12467   if (VDup->getOpcode() != ARMISD::VDUP)
12468     return SDValue();
12469 
12470   SDValue VMov = N->getOperand(0);
12471   if (VMov->getOpcode() == ISD::BITCAST)
12472     VMov = VMov->getOperand(0);
12473 
12474   if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov))
12475     return SDValue();
12476 
12477   SDLoc dl(N);
12478   SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32,
12479                                    DCI.DAG.getConstant(0, dl, MVT::i32),
12480                                    VDup->getOperand(0));
12481   return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate);
12482 }
12483 
12484 /// PerformVMULCombine
12485 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
12486 /// special multiplier accumulator forwarding.
12487 ///   vmul d3, d0, d2
12488 ///   vmla d3, d1, d2
12489 /// is faster than
12490 ///   vadd d3, d0, d1
12491 ///   vmul d3, d3, d2
12492 //  However, for (A + B) * (A + B),
12493 //    vadd d2, d0, d1
12494 //    vmul d3, d0, d2
12495 //    vmla d3, d1, d2
12496 //  is slower than
12497 //    vadd d2, d0, d1
12498 //    vmul d3, d2, d2
12499 static SDValue PerformVMULCombine(SDNode *N,
12500                                   TargetLowering::DAGCombinerInfo &DCI,
12501                                   const ARMSubtarget *Subtarget) {
12502   if (!Subtarget->hasVMLxForwarding())
12503     return SDValue();
12504 
12505   SelectionDAG &DAG = DCI.DAG;
12506   SDValue N0 = N->getOperand(0);
12507   SDValue N1 = N->getOperand(1);
12508   unsigned Opcode = N0.getOpcode();
12509   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
12510       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
12511     Opcode = N1.getOpcode();
12512     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
12513         Opcode != ISD::FADD && Opcode != ISD::FSUB)
12514       return SDValue();
12515     std::swap(N0, N1);
12516   }
12517 
12518   if (N0 == N1)
12519     return SDValue();
12520 
12521   EVT VT = N->getValueType(0);
12522   SDLoc DL(N);
12523   SDValue N00 = N0->getOperand(0);
12524   SDValue N01 = N0->getOperand(1);
12525   return DAG.getNode(Opcode, DL, VT,
12526                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
12527                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
12528 }
12529 
12530 static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
12531                                       const ARMSubtarget *Subtarget) {
12532   EVT VT = N->getValueType(0);
12533   if (VT != MVT::v2i64)
12534     return SDValue();
12535 
12536   SDValue N0 = N->getOperand(0);
12537   SDValue N1 = N->getOperand(1);
12538 
12539   auto IsSignExt = [&](SDValue Op) {
12540     if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG)
12541       return SDValue();
12542     EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT();
12543     if (VT.getScalarSizeInBits() == 32)
12544       return Op->getOperand(0);
12545     return SDValue();
12546   };
12547   auto IsZeroExt = [&](SDValue Op) {
12548     // Zero extends are a little more awkward. At the point we are matching
12549     // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask.
12550     // That might be before of after a bitcast depending on how the and is
12551     // placed. Because this has to look through bitcasts, it is currently only
12552     // supported on LE.
12553     if (!Subtarget->isLittle())
12554       return SDValue();
12555 
12556     SDValue And = Op;
12557     if (And->getOpcode() == ISD::BITCAST)
12558       And = And->getOperand(0);
12559     if (And->getOpcode() != ISD::AND)
12560       return SDValue();
12561     SDValue Mask = And->getOperand(1);
12562     if (Mask->getOpcode() == ISD::BITCAST)
12563       Mask = Mask->getOperand(0);
12564 
12565     if (Mask->getOpcode() != ISD::BUILD_VECTOR ||
12566         Mask.getValueType() != MVT::v4i32)
12567       return SDValue();
12568     if (isAllOnesConstant(Mask->getOperand(0)) &&
12569         isNullConstant(Mask->getOperand(1)) &&
12570         isAllOnesConstant(Mask->getOperand(2)) &&
12571         isNullConstant(Mask->getOperand(3)))
12572       return And->getOperand(0);
12573     return SDValue();
12574   };
12575 
12576   SDLoc dl(N);
12577   if (SDValue Op0 = IsSignExt(N0)) {
12578     if (SDValue Op1 = IsSignExt(N1)) {
12579       SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
12580       SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
12581       return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a);
12582     }
12583   }
12584   if (SDValue Op0 = IsZeroExt(N0)) {
12585     if (SDValue Op1 = IsZeroExt(N1)) {
12586       SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
12587       SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
12588       return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a);
12589     }
12590   }
12591 
12592   return SDValue();
12593 }
12594 
12595 static SDValue PerformMULCombine(SDNode *N,
12596                                  TargetLowering::DAGCombinerInfo &DCI,
12597                                  const ARMSubtarget *Subtarget) {
12598   SelectionDAG &DAG = DCI.DAG;
12599 
12600   EVT VT = N->getValueType(0);
12601   if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64)
12602     return PerformMVEVMULLCombine(N, DAG, Subtarget);
12603 
12604   if (Subtarget->isThumb1Only())
12605     return SDValue();
12606 
12607   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12608     return SDValue();
12609 
12610   if (VT.is64BitVector() || VT.is128BitVector())
12611     return PerformVMULCombine(N, DCI, Subtarget);
12612   if (VT != MVT::i32)
12613     return SDValue();
12614 
12615   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12616   if (!C)
12617     return SDValue();
12618 
12619   int64_t MulAmt = C->getSExtValue();
12620   unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
12621 
12622   ShiftAmt = ShiftAmt & (32 - 1);
12623   SDValue V = N->getOperand(0);
12624   SDLoc DL(N);
12625 
12626   SDValue Res;
12627   MulAmt >>= ShiftAmt;
12628 
12629   if (MulAmt >= 0) {
12630     if (isPowerOf2_32(MulAmt - 1)) {
12631       // (mul x, 2^N + 1) => (add (shl x, N), x)
12632       Res = DAG.getNode(ISD::ADD, DL, VT,
12633                         V,
12634                         DAG.getNode(ISD::SHL, DL, VT,
12635                                     V,
12636                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
12637                                                     MVT::i32)));
12638     } else if (isPowerOf2_32(MulAmt + 1)) {
12639       // (mul x, 2^N - 1) => (sub (shl x, N), x)
12640       Res = DAG.getNode(ISD::SUB, DL, VT,
12641                         DAG.getNode(ISD::SHL, DL, VT,
12642                                     V,
12643                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
12644                                                     MVT::i32)),
12645                         V);
12646     } else
12647       return SDValue();
12648   } else {
12649     uint64_t MulAmtAbs = -MulAmt;
12650     if (isPowerOf2_32(MulAmtAbs + 1)) {
12651       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
12652       Res = DAG.getNode(ISD::SUB, DL, VT,
12653                         V,
12654                         DAG.getNode(ISD::SHL, DL, VT,
12655                                     V,
12656                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
12657                                                     MVT::i32)));
12658     } else if (isPowerOf2_32(MulAmtAbs - 1)) {
12659       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
12660       Res = DAG.getNode(ISD::ADD, DL, VT,
12661                         V,
12662                         DAG.getNode(ISD::SHL, DL, VT,
12663                                     V,
12664                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
12665                                                     MVT::i32)));
12666       Res = DAG.getNode(ISD::SUB, DL, VT,
12667                         DAG.getConstant(0, DL, MVT::i32), Res);
12668     } else
12669       return SDValue();
12670   }
12671 
12672   if (ShiftAmt != 0)
12673     Res = DAG.getNode(ISD::SHL, DL, VT,
12674                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
12675 
12676   // Do not add new nodes to DAG combiner worklist.
12677   DCI.CombineTo(N, Res, false);
12678   return SDValue();
12679 }
12680 
12681 static SDValue CombineANDShift(SDNode *N,
12682                                TargetLowering::DAGCombinerInfo &DCI,
12683                                const ARMSubtarget *Subtarget) {
12684   // Allow DAGCombine to pattern-match before we touch the canonical form.
12685   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12686     return SDValue();
12687 
12688   if (N->getValueType(0) != MVT::i32)
12689     return SDValue();
12690 
12691   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12692   if (!N1C)
12693     return SDValue();
12694 
12695   uint32_t C1 = (uint32_t)N1C->getZExtValue();
12696   // Don't transform uxtb/uxth.
12697   if (C1 == 255 || C1 == 65535)
12698     return SDValue();
12699 
12700   SDNode *N0 = N->getOperand(0).getNode();
12701   if (!N0->hasOneUse())
12702     return SDValue();
12703 
12704   if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
12705     return SDValue();
12706 
12707   bool LeftShift = N0->getOpcode() == ISD::SHL;
12708 
12709   ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12710   if (!N01C)
12711     return SDValue();
12712 
12713   uint32_t C2 = (uint32_t)N01C->getZExtValue();
12714   if (!C2 || C2 >= 32)
12715     return SDValue();
12716 
12717   // Clear irrelevant bits in the mask.
12718   if (LeftShift)
12719     C1 &= (-1U << C2);
12720   else
12721     C1 &= (-1U >> C2);
12722 
12723   SelectionDAG &DAG = DCI.DAG;
12724   SDLoc DL(N);
12725 
12726   // We have a pattern of the form "(and (shl x, c2) c1)" or
12727   // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
12728   // transform to a pair of shifts, to save materializing c1.
12729 
12730   // First pattern: right shift, then mask off leading bits.
12731   // FIXME: Use demanded bits?
12732   if (!LeftShift && isMask_32(C1)) {
12733     uint32_t C3 = countLeadingZeros(C1);
12734     if (C2 < C3) {
12735       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12736                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
12737       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12738                          DAG.getConstant(C3, DL, MVT::i32));
12739     }
12740   }
12741 
12742   // First pattern, reversed: left shift, then mask off trailing bits.
12743   if (LeftShift && isMask_32(~C1)) {
12744     uint32_t C3 = countTrailingZeros(C1);
12745     if (C2 < C3) {
12746       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12747                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
12748       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12749                          DAG.getConstant(C3, DL, MVT::i32));
12750     }
12751   }
12752 
12753   // Second pattern: left shift, then mask off leading bits.
12754   // FIXME: Use demanded bits?
12755   if (LeftShift && isShiftedMask_32(C1)) {
12756     uint32_t Trailing = countTrailingZeros(C1);
12757     uint32_t C3 = countLeadingZeros(C1);
12758     if (Trailing == C2 && C2 + C3 < 32) {
12759       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12760                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
12761       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12762                         DAG.getConstant(C3, DL, MVT::i32));
12763     }
12764   }
12765 
12766   // Second pattern, reversed: right shift, then mask off trailing bits.
12767   // FIXME: Handle other patterns of known/demanded bits.
12768   if (!LeftShift && isShiftedMask_32(C1)) {
12769     uint32_t Leading = countLeadingZeros(C1);
12770     uint32_t C3 = countTrailingZeros(C1);
12771     if (Leading == C2 && C2 + C3 < 32) {
12772       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12773                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
12774       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12775                          DAG.getConstant(C3, DL, MVT::i32));
12776     }
12777   }
12778 
12779   // FIXME: Transform "(and (shl x, c2) c1)" ->
12780   // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
12781   // c1.
12782   return SDValue();
12783 }
12784 
12785 static SDValue PerformANDCombine(SDNode *N,
12786                                  TargetLowering::DAGCombinerInfo &DCI,
12787                                  const ARMSubtarget *Subtarget) {
12788   // Attempt to use immediate-form VBIC
12789   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
12790   SDLoc dl(N);
12791   EVT VT = N->getValueType(0);
12792   SelectionDAG &DAG = DCI.DAG;
12793 
12794   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 ||
12795       VT == MVT::v8i1 || VT == MVT::v16i1)
12796     return SDValue();
12797 
12798   APInt SplatBits, SplatUndef;
12799   unsigned SplatBitSize;
12800   bool HasAnyUndefs;
12801   if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
12802       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12803     if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
12804         SplatBitSize == 64) {
12805       EVT VbicVT;
12806       SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(),
12807                                       SplatUndef.getZExtValue(), SplatBitSize,
12808                                       DAG, dl, VbicVT, VT, OtherModImm);
12809       if (Val.getNode()) {
12810         SDValue Input =
12811           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
12812         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
12813         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
12814       }
12815     }
12816   }
12817 
12818   if (!Subtarget->isThumb1Only()) {
12819     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
12820     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
12821       return Result;
12822 
12823     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12824       return Result;
12825   }
12826 
12827   if (Subtarget->isThumb1Only())
12828     if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
12829       return Result;
12830 
12831   return SDValue();
12832 }
12833 
12834 // Try combining OR nodes to SMULWB, SMULWT.
12835 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
12836                                          TargetLowering::DAGCombinerInfo &DCI,
12837                                          const ARMSubtarget *Subtarget) {
12838   if (!Subtarget->hasV6Ops() ||
12839       (Subtarget->isThumb() &&
12840        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
12841     return SDValue();
12842 
12843   SDValue SRL = OR->getOperand(0);
12844   SDValue SHL = OR->getOperand(1);
12845 
12846   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
12847     SRL = OR->getOperand(1);
12848     SHL = OR->getOperand(0);
12849   }
12850   if (!isSRL16(SRL) || !isSHL16(SHL))
12851     return SDValue();
12852 
12853   // The first operands to the shifts need to be the two results from the
12854   // same smul_lohi node.
12855   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
12856        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
12857     return SDValue();
12858 
12859   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
12860   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
12861       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
12862     return SDValue();
12863 
12864   // Now we have:
12865   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
12866   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
12867   // For SMUWB the 16-bit value will signed extended somehow.
12868   // For SMULWT only the SRA is required.
12869   // Check both sides of SMUL_LOHI
12870   SDValue OpS16 = SMULLOHI->getOperand(0);
12871   SDValue OpS32 = SMULLOHI->getOperand(1);
12872 
12873   SelectionDAG &DAG = DCI.DAG;
12874   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
12875     OpS16 = OpS32;
12876     OpS32 = SMULLOHI->getOperand(0);
12877   }
12878 
12879   SDLoc dl(OR);
12880   unsigned Opcode = 0;
12881   if (isS16(OpS16, DAG))
12882     Opcode = ARMISD::SMULWB;
12883   else if (isSRA16(OpS16)) {
12884     Opcode = ARMISD::SMULWT;
12885     OpS16 = OpS16->getOperand(0);
12886   }
12887   else
12888     return SDValue();
12889 
12890   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
12891   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
12892   return SDValue(OR, 0);
12893 }
12894 
12895 static SDValue PerformORCombineToBFI(SDNode *N,
12896                                      TargetLowering::DAGCombinerInfo &DCI,
12897                                      const ARMSubtarget *Subtarget) {
12898   // BFI is only available on V6T2+
12899   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
12900     return SDValue();
12901 
12902   EVT VT = N->getValueType(0);
12903   SDValue N0 = N->getOperand(0);
12904   SDValue N1 = N->getOperand(1);
12905   SelectionDAG &DAG = DCI.DAG;
12906   SDLoc DL(N);
12907   // 1) or (and A, mask), val => ARMbfi A, val, mask
12908   //      iff (val & mask) == val
12909   //
12910   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12911   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
12912   //          && mask == ~mask2
12913   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
12914   //          && ~mask == mask2
12915   //  (i.e., copy a bitfield value into another bitfield of the same width)
12916 
12917   if (VT != MVT::i32)
12918     return SDValue();
12919 
12920   SDValue N00 = N0.getOperand(0);
12921 
12922   // The value and the mask need to be constants so we can verify this is
12923   // actually a bitfield set. If the mask is 0xffff, we can do better
12924   // via a movt instruction, so don't use BFI in that case.
12925   SDValue MaskOp = N0.getOperand(1);
12926   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
12927   if (!MaskC)
12928     return SDValue();
12929   unsigned Mask = MaskC->getZExtValue();
12930   if (Mask == 0xffff)
12931     return SDValue();
12932   SDValue Res;
12933   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
12934   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
12935   if (N1C) {
12936     unsigned Val = N1C->getZExtValue();
12937     if ((Val & ~Mask) != Val)
12938       return SDValue();
12939 
12940     if (ARM::isBitFieldInvertedMask(Mask)) {
12941       Val >>= countTrailingZeros(~Mask);
12942 
12943       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
12944                         DAG.getConstant(Val, DL, MVT::i32),
12945                         DAG.getConstant(Mask, DL, MVT::i32));
12946 
12947       DCI.CombineTo(N, Res, false);
12948       // Return value from the original node to inform the combiner than N is
12949       // now dead.
12950       return SDValue(N, 0);
12951     }
12952   } else if (N1.getOpcode() == ISD::AND) {
12953     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12954     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
12955     if (!N11C)
12956       return SDValue();
12957     unsigned Mask2 = N11C->getZExtValue();
12958 
12959     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
12960     // as is to match.
12961     if (ARM::isBitFieldInvertedMask(Mask) &&
12962         (Mask == ~Mask2)) {
12963       // The pack halfword instruction works better for masks that fit it,
12964       // so use that when it's available.
12965       if (Subtarget->hasDSP() &&
12966           (Mask == 0xffff || Mask == 0xffff0000))
12967         return SDValue();
12968       // 2a
12969       unsigned amt = countTrailingZeros(Mask2);
12970       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
12971                         DAG.getConstant(amt, DL, MVT::i32));
12972       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
12973                         DAG.getConstant(Mask, DL, MVT::i32));
12974       DCI.CombineTo(N, Res, false);
12975       // Return value from the original node to inform the combiner than N is
12976       // now dead.
12977       return SDValue(N, 0);
12978     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
12979                (~Mask == Mask2)) {
12980       // The pack halfword instruction works better for masks that fit it,
12981       // so use that when it's available.
12982       if (Subtarget->hasDSP() &&
12983           (Mask2 == 0xffff || Mask2 == 0xffff0000))
12984         return SDValue();
12985       // 2b
12986       unsigned lsb = countTrailingZeros(Mask);
12987       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
12988                         DAG.getConstant(lsb, DL, MVT::i32));
12989       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
12990                         DAG.getConstant(Mask2, DL, MVT::i32));
12991       DCI.CombineTo(N, Res, false);
12992       // Return value from the original node to inform the combiner than N is
12993       // now dead.
12994       return SDValue(N, 0);
12995     }
12996   }
12997 
12998   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
12999       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
13000       ARM::isBitFieldInvertedMask(~Mask)) {
13001     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
13002     // where lsb(mask) == #shamt and masked bits of B are known zero.
13003     SDValue ShAmt = N00.getOperand(1);
13004     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
13005     unsigned LSB = countTrailingZeros(Mask);
13006     if (ShAmtC != LSB)
13007       return SDValue();
13008 
13009     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
13010                       DAG.getConstant(~Mask, DL, MVT::i32));
13011 
13012     DCI.CombineTo(N, Res, false);
13013     // Return value from the original node to inform the combiner than N is
13014     // now dead.
13015     return SDValue(N, 0);
13016   }
13017 
13018   return SDValue();
13019 }
13020 
13021 static bool isValidMVECond(unsigned CC, bool IsFloat) {
13022   switch (CC) {
13023   case ARMCC::EQ:
13024   case ARMCC::NE:
13025   case ARMCC::LE:
13026   case ARMCC::GT:
13027   case ARMCC::GE:
13028   case ARMCC::LT:
13029     return true;
13030   case ARMCC::HS:
13031   case ARMCC::HI:
13032     return !IsFloat;
13033   default:
13034     return false;
13035   };
13036 }
13037 
13038 static ARMCC::CondCodes getVCMPCondCode(SDValue N) {
13039   if (N->getOpcode() == ARMISD::VCMP)
13040     return (ARMCC::CondCodes)N->getConstantOperandVal(2);
13041   else if (N->getOpcode() == ARMISD::VCMPZ)
13042     return (ARMCC::CondCodes)N->getConstantOperandVal(1);
13043   else
13044     llvm_unreachable("Not a VCMP/VCMPZ!");
13045 }
13046 
13047 static bool CanInvertMVEVCMP(SDValue N) {
13048   ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N));
13049   return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint());
13050 }
13051 
13052 static SDValue PerformORCombine_i1(SDNode *N,
13053                                    TargetLowering::DAGCombinerInfo &DCI,
13054                                    const ARMSubtarget *Subtarget) {
13055   // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
13056   // together with predicates
13057   EVT VT = N->getValueType(0);
13058   SDLoc DL(N);
13059   SDValue N0 = N->getOperand(0);
13060   SDValue N1 = N->getOperand(1);
13061 
13062   auto IsFreelyInvertable = [&](SDValue V) {
13063     if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ)
13064       return CanInvertMVEVCMP(V);
13065     return false;
13066   };
13067 
13068   // At least one operand must be freely invertable.
13069   if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1)))
13070     return SDValue();
13071 
13072   SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT);
13073   SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT);
13074   SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1);
13075   return DCI.DAG.getLogicalNOT(DL, And, VT);
13076 }
13077 
13078 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
13079 static SDValue PerformORCombine(SDNode *N,
13080                                 TargetLowering::DAGCombinerInfo &DCI,
13081                                 const ARMSubtarget *Subtarget) {
13082   // Attempt to use immediate-form VORR
13083   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
13084   SDLoc dl(N);
13085   EVT VT = N->getValueType(0);
13086   SelectionDAG &DAG = DCI.DAG;
13087 
13088   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
13089     return SDValue();
13090 
13091   if (Subtarget->hasMVEIntegerOps() &&
13092       (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1))
13093     return PerformORCombine_i1(N, DCI, Subtarget);
13094 
13095   APInt SplatBits, SplatUndef;
13096   unsigned SplatBitSize;
13097   bool HasAnyUndefs;
13098   if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
13099       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
13100     if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
13101         SplatBitSize == 64) {
13102       EVT VorrVT;
13103       SDValue Val =
13104           isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
13105                             SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm);
13106       if (Val.getNode()) {
13107         SDValue Input =
13108           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
13109         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
13110         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
13111       }
13112     }
13113   }
13114 
13115   if (!Subtarget->isThumb1Only()) {
13116     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
13117     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
13118       return Result;
13119     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
13120       return Result;
13121   }
13122 
13123   SDValue N0 = N->getOperand(0);
13124   SDValue N1 = N->getOperand(1);
13125 
13126   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
13127   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
13128       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
13129 
13130     // The code below optimizes (or (and X, Y), Z).
13131     // The AND operand needs to have a single user to make these optimizations
13132     // profitable.
13133     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
13134       return SDValue();
13135 
13136     APInt SplatUndef;
13137     unsigned SplatBitSize;
13138     bool HasAnyUndefs;
13139 
13140     APInt SplatBits0, SplatBits1;
13141     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
13142     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
13143     // Ensure that the second operand of both ands are constants
13144     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
13145                                       HasAnyUndefs) && !HasAnyUndefs) {
13146         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
13147                                           HasAnyUndefs) && !HasAnyUndefs) {
13148             // Ensure that the bit width of the constants are the same and that
13149             // the splat arguments are logical inverses as per the pattern we
13150             // are trying to simplify.
13151             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
13152                 SplatBits0 == ~SplatBits1) {
13153                 // Canonicalize the vector type to make instruction selection
13154                 // simpler.
13155                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
13156                 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
13157                                              N0->getOperand(1),
13158                                              N0->getOperand(0),
13159                                              N1->getOperand(0));
13160                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
13161             }
13162         }
13163     }
13164   }
13165 
13166   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
13167   // reasonable.
13168   if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
13169     if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
13170       return Res;
13171   }
13172 
13173   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
13174     return Result;
13175 
13176   return SDValue();
13177 }
13178 
13179 static SDValue PerformXORCombine(SDNode *N,
13180                                  TargetLowering::DAGCombinerInfo &DCI,
13181                                  const ARMSubtarget *Subtarget) {
13182   EVT VT = N->getValueType(0);
13183   SelectionDAG &DAG = DCI.DAG;
13184 
13185   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
13186     return SDValue();
13187 
13188   if (!Subtarget->isThumb1Only()) {
13189     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
13190     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
13191       return Result;
13192 
13193     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
13194       return Result;
13195   }
13196 
13197   if (Subtarget->hasMVEIntegerOps()) {
13198     // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition.
13199     SDValue N0 = N->getOperand(0);
13200     SDValue N1 = N->getOperand(1);
13201     const TargetLowering *TLI = Subtarget->getTargetLowering();
13202     if (TLI->isConstTrueVal(N1.getNode()) &&
13203         (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) {
13204       if (CanInvertMVEVCMP(N0)) {
13205         SDLoc DL(N0);
13206         ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0));
13207 
13208         SmallVector<SDValue, 4> Ops;
13209         Ops.push_back(N0->getOperand(0));
13210         if (N0->getOpcode() == ARMISD::VCMP)
13211           Ops.push_back(N0->getOperand(1));
13212         Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32));
13213         return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops);
13214       }
13215     }
13216   }
13217 
13218   return SDValue();
13219 }
13220 
13221 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
13222 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
13223 // their position in "to" (Rd).
13224 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
13225   assert(N->getOpcode() == ARMISD::BFI);
13226 
13227   SDValue From = N->getOperand(1);
13228   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
13229   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
13230 
13231   // If the Base came from a SHR #C, we can deduce that it is really testing bit
13232   // #C in the base of the SHR.
13233   if (From->getOpcode() == ISD::SRL &&
13234       isa<ConstantSDNode>(From->getOperand(1))) {
13235     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
13236     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
13237     FromMask <<= Shift.getLimitedValue(31);
13238     From = From->getOperand(0);
13239   }
13240 
13241   return From;
13242 }
13243 
13244 // If A and B contain one contiguous set of bits, does A | B == A . B?
13245 //
13246 // Neither A nor B must be zero.
13247 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
13248   unsigned LastActiveBitInA =  A.countTrailingZeros();
13249   unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
13250   return LastActiveBitInA - 1 == FirstActiveBitInB;
13251 }
13252 
13253 static SDValue FindBFIToCombineWith(SDNode *N) {
13254   // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
13255   // if one exists.
13256   APInt ToMask, FromMask;
13257   SDValue From = ParseBFI(N, ToMask, FromMask);
13258   SDValue To = N->getOperand(0);
13259 
13260   // Now check for a compatible BFI to merge with. We can pass through BFIs that
13261   // aren't compatible, but not if they set the same bit in their destination as
13262   // we do (or that of any BFI we're going to combine with).
13263   SDValue V = To;
13264   APInt CombinedToMask = ToMask;
13265   while (V.getOpcode() == ARMISD::BFI) {
13266     APInt NewToMask, NewFromMask;
13267     SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
13268     if (NewFrom != From) {
13269       // This BFI has a different base. Keep going.
13270       CombinedToMask |= NewToMask;
13271       V = V.getOperand(0);
13272       continue;
13273     }
13274 
13275     // Do the written bits conflict with any we've seen so far?
13276     if ((NewToMask & CombinedToMask).getBoolValue())
13277       // Conflicting bits - bail out because going further is unsafe.
13278       return SDValue();
13279 
13280     // Are the new bits contiguous when combined with the old bits?
13281     if (BitsProperlyConcatenate(ToMask, NewToMask) &&
13282         BitsProperlyConcatenate(FromMask, NewFromMask))
13283       return V;
13284     if (BitsProperlyConcatenate(NewToMask, ToMask) &&
13285         BitsProperlyConcatenate(NewFromMask, FromMask))
13286       return V;
13287 
13288     // We've seen a write to some bits, so track it.
13289     CombinedToMask |= NewToMask;
13290     // Keep going...
13291     V = V.getOperand(0);
13292   }
13293 
13294   return SDValue();
13295 }
13296 
13297 static SDValue PerformBFICombine(SDNode *N,
13298                                  TargetLowering::DAGCombinerInfo &DCI) {
13299   SDValue N1 = N->getOperand(1);
13300   if (N1.getOpcode() == ISD::AND) {
13301     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
13302     // the bits being cleared by the AND are not demanded by the BFI.
13303     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
13304     if (!N11C)
13305       return SDValue();
13306     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
13307     unsigned LSB = countTrailingZeros(~InvMask);
13308     unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
13309     assert(Width <
13310                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
13311            "undefined behavior");
13312     unsigned Mask = (1u << Width) - 1;
13313     unsigned Mask2 = N11C->getZExtValue();
13314     if ((Mask & (~Mask2)) == 0)
13315       return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
13316                              N->getOperand(0), N1.getOperand(0),
13317                              N->getOperand(2));
13318   } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
13319     // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
13320     // Keep track of any consecutive bits set that all come from the same base
13321     // value. We can combine these together into a single BFI.
13322     SDValue CombineBFI = FindBFIToCombineWith(N);
13323     if (CombineBFI == SDValue())
13324       return SDValue();
13325 
13326     // We've found a BFI.
13327     APInt ToMask1, FromMask1;
13328     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
13329 
13330     APInt ToMask2, FromMask2;
13331     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
13332     assert(From1 == From2);
13333     (void)From2;
13334 
13335     // First, unlink CombineBFI.
13336     DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
13337     // Then create a new BFI, combining the two together.
13338     APInt NewFromMask = FromMask1 | FromMask2;
13339     APInt NewToMask = ToMask1 | ToMask2;
13340 
13341     EVT VT = N->getValueType(0);
13342     SDLoc dl(N);
13343 
13344     if (NewFromMask[0] == 0)
13345       From1 = DCI.DAG.getNode(
13346         ISD::SRL, dl, VT, From1,
13347         DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
13348     return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
13349                            DCI.DAG.getConstant(~NewToMask, dl, VT));
13350   }
13351   return SDValue();
13352 }
13353 
13354 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
13355 /// ARMISD::VMOVRRD.
13356 static SDValue PerformVMOVRRDCombine(SDNode *N,
13357                                      TargetLowering::DAGCombinerInfo &DCI,
13358                                      const ARMSubtarget *Subtarget) {
13359   // vmovrrd(vmovdrr x, y) -> x,y
13360   SDValue InDouble = N->getOperand(0);
13361   if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
13362     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
13363 
13364   // vmovrrd(load f64) -> (load i32), (load i32)
13365   SDNode *InNode = InDouble.getNode();
13366   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
13367       InNode->getValueType(0) == MVT::f64 &&
13368       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
13369       !cast<LoadSDNode>(InNode)->isVolatile()) {
13370     // TODO: Should this be done for non-FrameIndex operands?
13371     LoadSDNode *LD = cast<LoadSDNode>(InNode);
13372 
13373     SelectionDAG &DAG = DCI.DAG;
13374     SDLoc DL(LD);
13375     SDValue BasePtr = LD->getBasePtr();
13376     SDValue NewLD1 =
13377         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
13378                     LD->getAlignment(), LD->getMemOperand()->getFlags());
13379 
13380     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
13381                                     DAG.getConstant(4, DL, MVT::i32));
13382 
13383     SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr,
13384                                  LD->getPointerInfo().getWithOffset(4),
13385                                  std::min(4U, LD->getAlignment()),
13386                                  LD->getMemOperand()->getFlags());
13387 
13388     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
13389     if (DCI.DAG.getDataLayout().isBigEndian())
13390       std::swap (NewLD1, NewLD2);
13391     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
13392     return Result;
13393   }
13394 
13395   return SDValue();
13396 }
13397 
13398 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
13399 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
13400 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
13401   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
13402   SDValue Op0 = N->getOperand(0);
13403   SDValue Op1 = N->getOperand(1);
13404   if (Op0.getOpcode() == ISD::BITCAST)
13405     Op0 = Op0.getOperand(0);
13406   if (Op1.getOpcode() == ISD::BITCAST)
13407     Op1 = Op1.getOperand(0);
13408   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
13409       Op0.getNode() == Op1.getNode() &&
13410       Op0.getResNo() == 0 && Op1.getResNo() == 1)
13411     return DAG.getNode(ISD::BITCAST, SDLoc(N),
13412                        N->getValueType(0), Op0.getOperand(0));
13413   return SDValue();
13414 }
13415 
13416 static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13417   SDValue Op0 = N->getOperand(0);
13418 
13419   // VMOVhr (VMOVrh (X)) -> X
13420   if (Op0->getOpcode() == ARMISD::VMOVrh)
13421     return Op0->getOperand(0);
13422 
13423   // FullFP16: half values are passed in S-registers, and we don't
13424   // need any of the bitcast and moves:
13425   //
13426   //     t2: f32,ch = CopyFromReg t0, Register:f32 %0
13427   //   t5: i32 = bitcast t2
13428   // t18: f16 = ARMISD::VMOVhr t5
13429   if (Op0->getOpcode() == ISD::BITCAST) {
13430     SDValue Copy = Op0->getOperand(0);
13431     if (Copy.getValueType() == MVT::f32 &&
13432         Copy->getOpcode() == ISD::CopyFromReg) {
13433       SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)};
13434       SDValue NewCopy =
13435           DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops);
13436       return NewCopy;
13437     }
13438   }
13439 
13440   // fold (VMOVhr (load x)) -> (load (f16*)x)
13441   if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) {
13442     if (LN0->hasOneUse() && LN0->isUnindexed() &&
13443         LN0->getMemoryVT() == MVT::i16) {
13444       SDValue Load =
13445           DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(),
13446                           LN0->getBasePtr(), LN0->getMemOperand());
13447       DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
13448       DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1));
13449       return Load;
13450     }
13451   }
13452 
13453   // Only the bottom 16 bits of the source register are used.
13454   APInt DemandedMask = APInt::getLowBitsSet(32, 16);
13455   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
13456   if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI))
13457     return SDValue(N, 0);
13458 
13459   return SDValue();
13460 }
13461 
13462 static SDValue PerformVMOVrhCombine(SDNode *N,
13463                                     TargetLowering::DAGCombinerInfo &DCI) {
13464   SDValue N0 = N->getOperand(0);
13465   EVT VT = N->getValueType(0);
13466 
13467   // fold (VMOVrh (load x)) -> (zextload (i16*)x)
13468   if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
13469     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
13470 
13471     SDValue Load =
13472         DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(),
13473                            LN0->getBasePtr(), MVT::i16, LN0->getMemOperand());
13474     DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
13475     DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
13476     return Load;
13477   }
13478 
13479   // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n)
13480   if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
13481       isa<ConstantSDNode>(N0->getOperand(1)))
13482     return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0),
13483                            N0->getOperand(1));
13484 
13485   return SDValue();
13486 }
13487 
13488 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
13489 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
13490 /// i64 vector to have f64 elements, since the value can then be loaded
13491 /// directly into a VFP register.
13492 static bool hasNormalLoadOperand(SDNode *N) {
13493   unsigned NumElts = N->getValueType(0).getVectorNumElements();
13494   for (unsigned i = 0; i < NumElts; ++i) {
13495     SDNode *Elt = N->getOperand(i).getNode();
13496     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
13497       return true;
13498   }
13499   return false;
13500 }
13501 
13502 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
13503 /// ISD::BUILD_VECTOR.
13504 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
13505                                           TargetLowering::DAGCombinerInfo &DCI,
13506                                           const ARMSubtarget *Subtarget) {
13507   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
13508   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
13509   // into a pair of GPRs, which is fine when the value is used as a scalar,
13510   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
13511   SelectionDAG &DAG = DCI.DAG;
13512   if (N->getNumOperands() == 2)
13513     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
13514       return RV;
13515 
13516   // Load i64 elements as f64 values so that type legalization does not split
13517   // them up into i32 values.
13518   EVT VT = N->getValueType(0);
13519   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
13520     return SDValue();
13521   SDLoc dl(N);
13522   SmallVector<SDValue, 8> Ops;
13523   unsigned NumElts = VT.getVectorNumElements();
13524   for (unsigned i = 0; i < NumElts; ++i) {
13525     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
13526     Ops.push_back(V);
13527     // Make the DAGCombiner fold the bitcast.
13528     DCI.AddToWorklist(V.getNode());
13529   }
13530   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
13531   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
13532   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
13533 }
13534 
13535 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
13536 static SDValue
13537 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13538   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
13539   // At that time, we may have inserted bitcasts from integer to float.
13540   // If these bitcasts have survived DAGCombine, change the lowering of this
13541   // BUILD_VECTOR in something more vector friendly, i.e., that does not
13542   // force to use floating point types.
13543 
13544   // Make sure we can change the type of the vector.
13545   // This is possible iff:
13546   // 1. The vector is only used in a bitcast to a integer type. I.e.,
13547   //    1.1. Vector is used only once.
13548   //    1.2. Use is a bit convert to an integer type.
13549   // 2. The size of its operands are 32-bits (64-bits are not legal).
13550   EVT VT = N->getValueType(0);
13551   EVT EltVT = VT.getVectorElementType();
13552 
13553   // Check 1.1. and 2.
13554   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
13555     return SDValue();
13556 
13557   // By construction, the input type must be float.
13558   assert(EltVT == MVT::f32 && "Unexpected type!");
13559 
13560   // Check 1.2.
13561   SDNode *Use = *N->use_begin();
13562   if (Use->getOpcode() != ISD::BITCAST ||
13563       Use->getValueType(0).isFloatingPoint())
13564     return SDValue();
13565 
13566   // Check profitability.
13567   // Model is, if more than half of the relevant operands are bitcast from
13568   // i32, turn the build_vector into a sequence of insert_vector_elt.
13569   // Relevant operands are everything that is not statically
13570   // (i.e., at compile time) bitcasted.
13571   unsigned NumOfBitCastedElts = 0;
13572   unsigned NumElts = VT.getVectorNumElements();
13573   unsigned NumOfRelevantElts = NumElts;
13574   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
13575     SDValue Elt = N->getOperand(Idx);
13576     if (Elt->getOpcode() == ISD::BITCAST) {
13577       // Assume only bit cast to i32 will go away.
13578       if (Elt->getOperand(0).getValueType() == MVT::i32)
13579         ++NumOfBitCastedElts;
13580     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
13581       // Constants are statically casted, thus do not count them as
13582       // relevant operands.
13583       --NumOfRelevantElts;
13584   }
13585 
13586   // Check if more than half of the elements require a non-free bitcast.
13587   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
13588     return SDValue();
13589 
13590   SelectionDAG &DAG = DCI.DAG;
13591   // Create the new vector type.
13592   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
13593   // Check if the type is legal.
13594   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13595   if (!TLI.isTypeLegal(VecVT))
13596     return SDValue();
13597 
13598   // Combine:
13599   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
13600   // => BITCAST INSERT_VECTOR_ELT
13601   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
13602   //                      (BITCAST EN), N.
13603   SDValue Vec = DAG.getUNDEF(VecVT);
13604   SDLoc dl(N);
13605   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
13606     SDValue V = N->getOperand(Idx);
13607     if (V.isUndef())
13608       continue;
13609     if (V.getOpcode() == ISD::BITCAST &&
13610         V->getOperand(0).getValueType() == MVT::i32)
13611       // Fold obvious case.
13612       V = V.getOperand(0);
13613     else {
13614       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
13615       // Make the DAGCombiner fold the bitcasts.
13616       DCI.AddToWorklist(V.getNode());
13617     }
13618     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
13619     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
13620   }
13621   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
13622   // Make the DAGCombiner fold the bitcasts.
13623   DCI.AddToWorklist(Vec.getNode());
13624   return Vec;
13625 }
13626 
13627 static SDValue
13628 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13629   EVT VT = N->getValueType(0);
13630   SDValue Op = N->getOperand(0);
13631   SDLoc dl(N);
13632 
13633   // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
13634   if (Op->getOpcode() == ARMISD::PREDICATE_CAST) {
13635     // If the valuetypes are the same, we can remove the cast entirely.
13636     if (Op->getOperand(0).getValueType() == VT)
13637       return Op->getOperand(0);
13638     return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0));
13639   }
13640 
13641   return SDValue();
13642 }
13643 
13644 static SDValue
13645 PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
13646                               const ARMSubtarget *ST) {
13647   EVT VT = N->getValueType(0);
13648   SDValue Op = N->getOperand(0);
13649   SDLoc dl(N);
13650 
13651   // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST
13652   if (ST->isLittle())
13653     return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op);
13654 
13655   // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
13656   if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) {
13657     // If the valuetypes are the same, we can remove the cast entirely.
13658     if (Op->getOperand(0).getValueType() == VT)
13659       return Op->getOperand(0);
13660     return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0));
13661   }
13662 
13663   return SDValue();
13664 }
13665 
13666 static SDValue PerformVCMPCombine(SDNode *N,
13667                                   TargetLowering::DAGCombinerInfo &DCI,
13668                                   const ARMSubtarget *Subtarget) {
13669   if (!Subtarget->hasMVEIntegerOps())
13670     return SDValue();
13671 
13672   EVT VT = N->getValueType(0);
13673   SDValue Op0 = N->getOperand(0);
13674   SDValue Op1 = N->getOperand(1);
13675   ARMCC::CondCodes Cond =
13676       (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
13677   SDLoc dl(N);
13678 
13679   // vcmp X, 0, cc -> vcmpz X, cc
13680   if (isZeroVector(Op1))
13681     return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0,
13682                            N->getOperand(2));
13683 
13684   unsigned SwappedCond = getSwappedCondition(Cond);
13685   if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) {
13686     // vcmp 0, X, cc -> vcmpz X, reversed(cc)
13687     if (isZeroVector(Op0))
13688       return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1,
13689                              DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
13690     // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc)
13691     if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP)
13692       return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0,
13693                              DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
13694   }
13695 
13696   return SDValue();
13697 }
13698 
13699 /// PerformInsertEltCombine - Target-specific dag combine xforms for
13700 /// ISD::INSERT_VECTOR_ELT.
13701 static SDValue PerformInsertEltCombine(SDNode *N,
13702                                        TargetLowering::DAGCombinerInfo &DCI) {
13703   // Bitcast an i64 load inserted into a vector to f64.
13704   // Otherwise, the i64 value will be legalized to a pair of i32 values.
13705   EVT VT = N->getValueType(0);
13706   SDNode *Elt = N->getOperand(1).getNode();
13707   if (VT.getVectorElementType() != MVT::i64 ||
13708       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
13709     return SDValue();
13710 
13711   SelectionDAG &DAG = DCI.DAG;
13712   SDLoc dl(N);
13713   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
13714                                  VT.getVectorNumElements());
13715   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
13716   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
13717   // Make the DAGCombiner fold the bitcasts.
13718   DCI.AddToWorklist(Vec.getNode());
13719   DCI.AddToWorklist(V.getNode());
13720   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
13721                                Vec, V, N->getOperand(2));
13722   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
13723 }
13724 
13725 static SDValue PerformExtractEltCombine(SDNode *N,
13726                                         TargetLowering::DAGCombinerInfo &DCI) {
13727   SDValue Op0 = N->getOperand(0);
13728   EVT VT = N->getValueType(0);
13729   SDLoc dl(N);
13730 
13731   // extract (vdup x) -> x
13732   if (Op0->getOpcode() == ARMISD::VDUP) {
13733     SDValue X = Op0->getOperand(0);
13734     if (VT == MVT::f16 && X.getValueType() == MVT::i32)
13735       return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X);
13736     if (VT == MVT::i32 && X.getValueType() == MVT::f16)
13737       return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X);
13738 
13739     while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST)
13740       X = X->getOperand(0);
13741     if (X.getValueType() == VT)
13742       return X;
13743   }
13744 
13745   return SDValue();
13746 }
13747 
13748 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
13749 /// ISD::VECTOR_SHUFFLE.
13750 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
13751   // The LLVM shufflevector instruction does not require the shuffle mask
13752   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
13753   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
13754   // operands do not match the mask length, they are extended by concatenating
13755   // them with undef vectors.  That is probably the right thing for other
13756   // targets, but for NEON it is better to concatenate two double-register
13757   // size vector operands into a single quad-register size vector.  Do that
13758   // transformation here:
13759   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
13760   //   shuffle(concat(v1, v2), undef)
13761   SDValue Op0 = N->getOperand(0);
13762   SDValue Op1 = N->getOperand(1);
13763   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
13764       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
13765       Op0.getNumOperands() != 2 ||
13766       Op1.getNumOperands() != 2)
13767     return SDValue();
13768   SDValue Concat0Op1 = Op0.getOperand(1);
13769   SDValue Concat1Op1 = Op1.getOperand(1);
13770   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
13771     return SDValue();
13772   // Skip the transformation if any of the types are illegal.
13773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13774   EVT VT = N->getValueType(0);
13775   if (!TLI.isTypeLegal(VT) ||
13776       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
13777       !TLI.isTypeLegal(Concat1Op1.getValueType()))
13778     return SDValue();
13779 
13780   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
13781                                   Op0.getOperand(0), Op1.getOperand(0));
13782   // Translate the shuffle mask.
13783   SmallVector<int, 16> NewMask;
13784   unsigned NumElts = VT.getVectorNumElements();
13785   unsigned HalfElts = NumElts/2;
13786   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
13787   for (unsigned n = 0; n < NumElts; ++n) {
13788     int MaskElt = SVN->getMaskElt(n);
13789     int NewElt = -1;
13790     if (MaskElt < (int)HalfElts)
13791       NewElt = MaskElt;
13792     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
13793       NewElt = HalfElts + MaskElt - NumElts;
13794     NewMask.push_back(NewElt);
13795   }
13796   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
13797                               DAG.getUNDEF(VT), NewMask);
13798 }
13799 
13800 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
13801 /// NEON load/store intrinsics, and generic vector load/stores, to merge
13802 /// base address updates.
13803 /// For generic load/stores, the memory type is assumed to be a vector.
13804 /// The caller is assumed to have checked legality.
13805 static SDValue CombineBaseUpdate(SDNode *N,
13806                                  TargetLowering::DAGCombinerInfo &DCI) {
13807   SelectionDAG &DAG = DCI.DAG;
13808   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
13809                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
13810   const bool isStore = N->getOpcode() == ISD::STORE;
13811   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
13812   SDValue Addr = N->getOperand(AddrOpIdx);
13813   MemSDNode *MemN = cast<MemSDNode>(N);
13814   SDLoc dl(N);
13815 
13816   // Search for a use of the address operand that is an increment.
13817   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
13818          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
13819     SDNode *User = *UI;
13820     if (User->getOpcode() != ISD::ADD ||
13821         UI.getUse().getResNo() != Addr.getResNo())
13822       continue;
13823 
13824     // Check that the add is independent of the load/store.  Otherwise, folding
13825     // it would create a cycle. We can avoid searching through Addr as it's a
13826     // predecessor to both.
13827     SmallPtrSet<const SDNode *, 32> Visited;
13828     SmallVector<const SDNode *, 16> Worklist;
13829     Visited.insert(Addr.getNode());
13830     Worklist.push_back(N);
13831     Worklist.push_back(User);
13832     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
13833         SDNode::hasPredecessorHelper(User, Visited, Worklist))
13834       continue;
13835 
13836     // Find the new opcode for the updating load/store.
13837     bool isLoadOp = true;
13838     bool isLaneOp = false;
13839     unsigned NewOpc = 0;
13840     unsigned NumVecs = 0;
13841     if (isIntrinsic) {
13842       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
13843       switch (IntNo) {
13844       default: llvm_unreachable("unexpected intrinsic for Neon base update");
13845       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
13846         NumVecs = 1; break;
13847       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
13848         NumVecs = 2; break;
13849       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
13850         NumVecs = 3; break;
13851       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
13852         NumVecs = 4; break;
13853       case Intrinsic::arm_neon_vld2dup:
13854       case Intrinsic::arm_neon_vld3dup:
13855       case Intrinsic::arm_neon_vld4dup:
13856         // TODO: Support updating VLDxDUP nodes. For now, we just skip
13857         // combining base updates for such intrinsics.
13858         continue;
13859       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
13860         NumVecs = 2; isLaneOp = true; break;
13861       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
13862         NumVecs = 3; isLaneOp = true; break;
13863       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
13864         NumVecs = 4; isLaneOp = true; break;
13865       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
13866         NumVecs = 1; isLoadOp = false; break;
13867       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
13868         NumVecs = 2; isLoadOp = false; break;
13869       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
13870         NumVecs = 3; isLoadOp = false; break;
13871       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
13872         NumVecs = 4; isLoadOp = false; break;
13873       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
13874         NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
13875       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
13876         NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
13877       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
13878         NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
13879       }
13880     } else {
13881       isLaneOp = true;
13882       switch (N->getOpcode()) {
13883       default: llvm_unreachable("unexpected opcode for Neon base update");
13884       case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
13885       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
13886       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
13887       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
13888       case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
13889         NumVecs = 1; isLaneOp = false; break;
13890       case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
13891         NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
13892       }
13893     }
13894 
13895     // Find the size of memory referenced by the load/store.
13896     EVT VecTy;
13897     if (isLoadOp) {
13898       VecTy = N->getValueType(0);
13899     } else if (isIntrinsic) {
13900       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
13901     } else {
13902       assert(isStore && "Node has to be a load, a store, or an intrinsic!");
13903       VecTy = N->getOperand(1).getValueType();
13904     }
13905 
13906     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
13907     if (isLaneOp)
13908       NumBytes /= VecTy.getVectorNumElements();
13909 
13910     // If the increment is a constant, it must match the memory ref size.
13911     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
13912     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
13913     if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
13914       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
13915       // separate instructions that make it harder to use a non-constant update.
13916       continue;
13917     }
13918 
13919     // OK, we found an ADD we can fold into the base update.
13920     // Now, create a _UPD node, taking care of not breaking alignment.
13921 
13922     EVT AlignedVecTy = VecTy;
13923     unsigned Alignment = MemN->getAlignment();
13924 
13925     // If this is a less-than-standard-aligned load/store, change the type to
13926     // match the standard alignment.
13927     // The alignment is overlooked when selecting _UPD variants; and it's
13928     // easier to introduce bitcasts here than fix that.
13929     // There are 3 ways to get to this base-update combine:
13930     // - intrinsics: they are assumed to be properly aligned (to the standard
13931     //   alignment of the memory type), so we don't need to do anything.
13932     // - ARMISD::VLDx nodes: they are only generated from the aforementioned
13933     //   intrinsics, so, likewise, there's nothing to do.
13934     // - generic load/store instructions: the alignment is specified as an
13935     //   explicit operand, rather than implicitly as the standard alignment
13936     //   of the memory type (like the intrisics).  We need to change the
13937     //   memory type to match the explicit alignment.  That way, we don't
13938     //   generate non-standard-aligned ARMISD::VLDx nodes.
13939     if (isa<LSBaseSDNode>(N)) {
13940       if (Alignment == 0)
13941         Alignment = 1;
13942       if (Alignment < VecTy.getScalarSizeInBits() / 8) {
13943         MVT EltTy = MVT::getIntegerVT(Alignment * 8);
13944         assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
13945         assert(!isLaneOp && "Unexpected generic load/store lane.");
13946         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
13947         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
13948       }
13949       // Don't set an explicit alignment on regular load/stores that we want
13950       // to transform to VLD/VST 1_UPD nodes.
13951       // This matches the behavior of regular load/stores, which only get an
13952       // explicit alignment if the MMO alignment is larger than the standard
13953       // alignment of the memory type.
13954       // Intrinsics, however, always get an explicit alignment, set to the
13955       // alignment of the MMO.
13956       Alignment = 1;
13957     }
13958 
13959     // Create the new updating load/store node.
13960     // First, create an SDVTList for the new updating node's results.
13961     EVT Tys[6];
13962     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
13963     unsigned n;
13964     for (n = 0; n < NumResultVecs; ++n)
13965       Tys[n] = AlignedVecTy;
13966     Tys[n++] = MVT::i32;
13967     Tys[n] = MVT::Other;
13968     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
13969 
13970     // Then, gather the new node's operands.
13971     SmallVector<SDValue, 8> Ops;
13972     Ops.push_back(N->getOperand(0)); // incoming chain
13973     Ops.push_back(N->getOperand(AddrOpIdx));
13974     Ops.push_back(Inc);
13975 
13976     if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
13977       // Try to match the intrinsic's signature
13978       Ops.push_back(StN->getValue());
13979     } else {
13980       // Loads (and of course intrinsics) match the intrinsics' signature,
13981       // so just add all but the alignment operand.
13982       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
13983         Ops.push_back(N->getOperand(i));
13984     }
13985 
13986     // For all node types, the alignment operand is always the last one.
13987     Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
13988 
13989     // If this is a non-standard-aligned STORE, the penultimate operand is the
13990     // stored value.  Bitcast it to the aligned type.
13991     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
13992       SDValue &StVal = Ops[Ops.size()-2];
13993       StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
13994     }
13995 
13996     EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
13997     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
13998                                            MemN->getMemOperand());
13999 
14000     // Update the uses.
14001     SmallVector<SDValue, 5> NewResults;
14002     for (unsigned i = 0; i < NumResultVecs; ++i)
14003       NewResults.push_back(SDValue(UpdN.getNode(), i));
14004 
14005     // If this is an non-standard-aligned LOAD, the first result is the loaded
14006     // value.  Bitcast it to the expected result type.
14007     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
14008       SDValue &LdVal = NewResults[0];
14009       LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
14010     }
14011 
14012     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
14013     DCI.CombineTo(N, NewResults);
14014     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
14015 
14016     break;
14017   }
14018   return SDValue();
14019 }
14020 
14021 static SDValue PerformVLDCombine(SDNode *N,
14022                                  TargetLowering::DAGCombinerInfo &DCI) {
14023   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14024     return SDValue();
14025 
14026   return CombineBaseUpdate(N, DCI);
14027 }
14028 
14029 static SDValue PerformMVEVLDCombine(SDNode *N,
14030                                     TargetLowering::DAGCombinerInfo &DCI) {
14031   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14032     return SDValue();
14033 
14034   SelectionDAG &DAG = DCI.DAG;
14035   SDValue Addr = N->getOperand(2);
14036   MemSDNode *MemN = cast<MemSDNode>(N);
14037   SDLoc dl(N);
14038 
14039   // For the stores, where there are multiple intrinsics we only actually want
14040   // to post-inc the last of the them.
14041   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
14042   if (IntNo == Intrinsic::arm_mve_vst2q &&
14043       cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1)
14044     return SDValue();
14045   if (IntNo == Intrinsic::arm_mve_vst4q &&
14046       cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3)
14047     return SDValue();
14048 
14049   // Search for a use of the address operand that is an increment.
14050   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
14051                             UE = Addr.getNode()->use_end();
14052        UI != UE; ++UI) {
14053     SDNode *User = *UI;
14054     if (User->getOpcode() != ISD::ADD ||
14055         UI.getUse().getResNo() != Addr.getResNo())
14056       continue;
14057 
14058     // Check that the add is independent of the load/store.  Otherwise, folding
14059     // it would create a cycle. We can avoid searching through Addr as it's a
14060     // predecessor to both.
14061     SmallPtrSet<const SDNode *, 32> Visited;
14062     SmallVector<const SDNode *, 16> Worklist;
14063     Visited.insert(Addr.getNode());
14064     Worklist.push_back(N);
14065     Worklist.push_back(User);
14066     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
14067         SDNode::hasPredecessorHelper(User, Visited, Worklist))
14068       continue;
14069 
14070     // Find the new opcode for the updating load/store.
14071     bool isLoadOp = true;
14072     unsigned NewOpc = 0;
14073     unsigned NumVecs = 0;
14074     switch (IntNo) {
14075     default:
14076       llvm_unreachable("unexpected intrinsic for MVE VLDn combine");
14077     case Intrinsic::arm_mve_vld2q:
14078       NewOpc = ARMISD::VLD2_UPD;
14079       NumVecs = 2;
14080       break;
14081     case Intrinsic::arm_mve_vld4q:
14082       NewOpc = ARMISD::VLD4_UPD;
14083       NumVecs = 4;
14084       break;
14085     case Intrinsic::arm_mve_vst2q:
14086       NewOpc = ARMISD::VST2_UPD;
14087       NumVecs = 2;
14088       isLoadOp = false;
14089       break;
14090     case Intrinsic::arm_mve_vst4q:
14091       NewOpc = ARMISD::VST4_UPD;
14092       NumVecs = 4;
14093       isLoadOp = false;
14094       break;
14095     }
14096 
14097     // Find the size of memory referenced by the load/store.
14098     EVT VecTy;
14099     if (isLoadOp) {
14100       VecTy = N->getValueType(0);
14101     } else {
14102       VecTy = N->getOperand(3).getValueType();
14103     }
14104 
14105     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
14106 
14107     // If the increment is a constant, it must match the memory ref size.
14108     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
14109     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
14110     if (!CInc || CInc->getZExtValue() != NumBytes)
14111       continue;
14112 
14113     // Create the new updating load/store node.
14114     // First, create an SDVTList for the new updating node's results.
14115     EVT Tys[6];
14116     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
14117     unsigned n;
14118     for (n = 0; n < NumResultVecs; ++n)
14119       Tys[n] = VecTy;
14120     Tys[n++] = MVT::i32;
14121     Tys[n] = MVT::Other;
14122     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
14123 
14124     // Then, gather the new node's operands.
14125     SmallVector<SDValue, 8> Ops;
14126     Ops.push_back(N->getOperand(0)); // incoming chain
14127     Ops.push_back(N->getOperand(2)); // ptr
14128     Ops.push_back(Inc);
14129 
14130     for (unsigned i = 3; i < N->getNumOperands(); ++i)
14131       Ops.push_back(N->getOperand(i));
14132 
14133     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy,
14134                                            MemN->getMemOperand());
14135 
14136     // Update the uses.
14137     SmallVector<SDValue, 5> NewResults;
14138     for (unsigned i = 0; i < NumResultVecs; ++i)
14139       NewResults.push_back(SDValue(UpdN.getNode(), i));
14140 
14141     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
14142     DCI.CombineTo(N, NewResults);
14143     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
14144 
14145     break;
14146   }
14147 
14148   return SDValue();
14149 }
14150 
14151 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
14152 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
14153 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
14154 /// return true.
14155 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
14156   SelectionDAG &DAG = DCI.DAG;
14157   EVT VT = N->getValueType(0);
14158   // vldN-dup instructions only support 64-bit vectors for N > 1.
14159   if (!VT.is64BitVector())
14160     return false;
14161 
14162   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
14163   SDNode *VLD = N->getOperand(0).getNode();
14164   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
14165     return false;
14166   unsigned NumVecs = 0;
14167   unsigned NewOpc = 0;
14168   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
14169   if (IntNo == Intrinsic::arm_neon_vld2lane) {
14170     NumVecs = 2;
14171     NewOpc = ARMISD::VLD2DUP;
14172   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
14173     NumVecs = 3;
14174     NewOpc = ARMISD::VLD3DUP;
14175   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
14176     NumVecs = 4;
14177     NewOpc = ARMISD::VLD4DUP;
14178   } else {
14179     return false;
14180   }
14181 
14182   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
14183   // numbers match the load.
14184   unsigned VLDLaneNo =
14185     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
14186   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
14187        UI != UE; ++UI) {
14188     // Ignore uses of the chain result.
14189     if (UI.getUse().getResNo() == NumVecs)
14190       continue;
14191     SDNode *User = *UI;
14192     if (User->getOpcode() != ARMISD::VDUPLANE ||
14193         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
14194       return false;
14195   }
14196 
14197   // Create the vldN-dup node.
14198   EVT Tys[5];
14199   unsigned n;
14200   for (n = 0; n < NumVecs; ++n)
14201     Tys[n] = VT;
14202   Tys[n] = MVT::Other;
14203   SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
14204   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
14205   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
14206   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
14207                                            Ops, VLDMemInt->getMemoryVT(),
14208                                            VLDMemInt->getMemOperand());
14209 
14210   // Update the uses.
14211   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
14212        UI != UE; ++UI) {
14213     unsigned ResNo = UI.getUse().getResNo();
14214     // Ignore uses of the chain result.
14215     if (ResNo == NumVecs)
14216       continue;
14217     SDNode *User = *UI;
14218     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
14219   }
14220 
14221   // Now the vldN-lane intrinsic is dead except for its chain result.
14222   // Update uses of the chain.
14223   std::vector<SDValue> VLDDupResults;
14224   for (unsigned n = 0; n < NumVecs; ++n)
14225     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
14226   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
14227   DCI.CombineTo(VLD, VLDDupResults);
14228 
14229   return true;
14230 }
14231 
14232 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
14233 /// ARMISD::VDUPLANE.
14234 static SDValue PerformVDUPLANECombine(SDNode *N,
14235                                       TargetLowering::DAGCombinerInfo &DCI,
14236                                       const ARMSubtarget *Subtarget) {
14237   SDValue Op = N->getOperand(0);
14238   EVT VT = N->getValueType(0);
14239 
14240   // On MVE, we just convert the VDUPLANE to a VDUP with an extract.
14241   if (Subtarget->hasMVEIntegerOps()) {
14242     EVT ExtractVT = VT.getVectorElementType();
14243     // We need to ensure we are creating a legal type.
14244     if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT))
14245       ExtractVT = MVT::i32;
14246     SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT,
14247                               N->getOperand(0), N->getOperand(1));
14248     return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract);
14249   }
14250 
14251   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
14252   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
14253   if (CombineVLDDUP(N, DCI))
14254     return SDValue(N, 0);
14255 
14256   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
14257   // redundant.  Ignore bit_converts for now; element sizes are checked below.
14258   while (Op.getOpcode() == ISD::BITCAST)
14259     Op = Op.getOperand(0);
14260   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
14261     return SDValue();
14262 
14263   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
14264   unsigned EltSize = Op.getScalarValueSizeInBits();
14265   // The canonical VMOV for a zero vector uses a 32-bit element size.
14266   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14267   unsigned EltBits;
14268   if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
14269     EltSize = 8;
14270   if (EltSize > VT.getScalarSizeInBits())
14271     return SDValue();
14272 
14273   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
14274 }
14275 
14276 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
14277 static SDValue PerformVDUPCombine(SDNode *N,
14278                                   TargetLowering::DAGCombinerInfo &DCI,
14279                                   const ARMSubtarget *Subtarget) {
14280   SelectionDAG &DAG = DCI.DAG;
14281   SDValue Op = N->getOperand(0);
14282   SDLoc dl(N);
14283 
14284   if (Subtarget->hasMVEIntegerOps()) {
14285     // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will
14286     // need to come from a GPR.
14287     if (Op.getValueType() == MVT::f32)
14288       return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
14289                              DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op));
14290     else if (Op.getValueType() == MVT::f16)
14291       return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
14292                              DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op));
14293   }
14294 
14295   if (!Subtarget->hasNEON())
14296     return SDValue();
14297 
14298   // Match VDUP(LOAD) -> VLD1DUP.
14299   // We match this pattern here rather than waiting for isel because the
14300   // transform is only legal for unindexed loads.
14301   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
14302   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
14303       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
14304     SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
14305                       DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
14306     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
14307     SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
14308                                              Ops, LD->getMemoryVT(),
14309                                              LD->getMemOperand());
14310     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
14311     return VLDDup;
14312   }
14313 
14314   return SDValue();
14315 }
14316 
14317 static SDValue PerformLOADCombine(SDNode *N,
14318                                   TargetLowering::DAGCombinerInfo &DCI) {
14319   EVT VT = N->getValueType(0);
14320 
14321   // If this is a legal vector load, try to combine it into a VLD1_UPD.
14322   if (ISD::isNormalLoad(N) && VT.isVector() &&
14323       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
14324     return CombineBaseUpdate(N, DCI);
14325 
14326   return SDValue();
14327 }
14328 
14329 // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
14330 // pack all of the elements in one place.  Next, store to memory in fewer
14331 // chunks.
14332 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
14333                                              SelectionDAG &DAG) {
14334   SDValue StVal = St->getValue();
14335   EVT VT = StVal.getValueType();
14336   if (!St->isTruncatingStore() || !VT.isVector())
14337     return SDValue();
14338   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14339   EVT StVT = St->getMemoryVT();
14340   unsigned NumElems = VT.getVectorNumElements();
14341   assert(StVT != VT && "Cannot truncate to the same type");
14342   unsigned FromEltSz = VT.getScalarSizeInBits();
14343   unsigned ToEltSz = StVT.getScalarSizeInBits();
14344 
14345   // From, To sizes and ElemCount must be pow of two
14346   if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz))
14347     return SDValue();
14348 
14349   // We are going to use the original vector elt for storing.
14350   // Accumulated smaller vector elements must be a multiple of the store size.
14351   if (0 != (NumElems * FromEltSz) % ToEltSz)
14352     return SDValue();
14353 
14354   unsigned SizeRatio = FromEltSz / ToEltSz;
14355   assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
14356 
14357   // Create a type on which we perform the shuffle.
14358   EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
14359                                    NumElems * SizeRatio);
14360   assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
14361 
14362   SDLoc DL(St);
14363   SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
14364   SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
14365   for (unsigned i = 0; i < NumElems; ++i)
14366     ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1
14367                                                       : i * SizeRatio;
14368 
14369   // Can't shuffle using an illegal type.
14370   if (!TLI.isTypeLegal(WideVecVT))
14371     return SDValue();
14372 
14373   SDValue Shuff = DAG.getVectorShuffle(
14374       WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec);
14375   // At this point all of the data is stored at the bottom of the
14376   // register. We now need to save it to mem.
14377 
14378   // Find the largest store unit
14379   MVT StoreType = MVT::i8;
14380   for (MVT Tp : MVT::integer_valuetypes()) {
14381     if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
14382       StoreType = Tp;
14383   }
14384   // Didn't find a legal store type.
14385   if (!TLI.isTypeLegal(StoreType))
14386     return SDValue();
14387 
14388   // Bitcast the original vector into a vector of store-size units
14389   EVT StoreVecVT =
14390       EVT::getVectorVT(*DAG.getContext(), StoreType,
14391                        VT.getSizeInBits() / EVT(StoreType).getSizeInBits());
14392   assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
14393   SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
14394   SmallVector<SDValue, 8> Chains;
14395   SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
14396                                       TLI.getPointerTy(DAG.getDataLayout()));
14397   SDValue BasePtr = St->getBasePtr();
14398 
14399   // Perform one or more big stores into memory.
14400   unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits();
14401   for (unsigned I = 0; I < E; I++) {
14402     SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType,
14403                                  ShuffWide, DAG.getIntPtrConstant(I, DL));
14404     SDValue Ch =
14405         DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(),
14406                      St->getAlignment(), St->getMemOperand()->getFlags());
14407     BasePtr =
14408         DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
14409     Chains.push_back(Ch);
14410   }
14411   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
14412 }
14413 
14414 // Try taking a single vector store from an truncate (which would otherwise turn
14415 // into an expensive buildvector) and splitting it into a series of narrowing
14416 // stores.
14417 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
14418                                                  SelectionDAG &DAG) {
14419   if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
14420     return SDValue();
14421   SDValue Trunc = St->getValue();
14422   if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND)
14423     return SDValue();
14424   EVT FromVT = Trunc->getOperand(0).getValueType();
14425   EVT ToVT = Trunc.getValueType();
14426   if (!ToVT.isVector())
14427     return SDValue();
14428   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
14429   EVT ToEltVT = ToVT.getVectorElementType();
14430   EVT FromEltVT = FromVT.getVectorElementType();
14431 
14432   unsigned NumElements = 0;
14433   if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8))
14434     NumElements = 4;
14435   if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8)
14436     NumElements = 8;
14437   if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16)
14438     NumElements = 4;
14439   if (NumElements == 0 ||
14440       (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) ||
14441       FromVT.getVectorNumElements() % NumElements != 0)
14442     return SDValue();
14443 
14444   // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so
14445   // use the VMOVN over splitting the store. We are looking for patterns of:
14446   // !rev: 0 N 1 N+1 2 N+2 ...
14447   //  rev: N 0 N+1 1 N+2 2 ...
14448   auto isVMOVNOriginalMask = [&](ArrayRef<int> M, bool rev) {
14449     unsigned NumElts = ToVT.getVectorNumElements();
14450     if (NumElts != M.size())
14451       return false;
14452 
14453     unsigned Off0 = rev ? NumElts : 0;
14454     unsigned Off1 = rev ? 0 : NumElts;
14455 
14456     for (unsigned i = 0; i < NumElts; i += 2) {
14457       if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2))
14458         return false;
14459       if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2))
14460         return false;
14461     }
14462 
14463     return true;
14464   };
14465 
14466   if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc->getOperand(0)))
14467     if (isVMOVNOriginalMask(Shuffle->getMask(), false) ||
14468         isVMOVNOriginalMask(Shuffle->getMask(), true))
14469       return SDValue();
14470 
14471   LLVMContext &C = *DAG.getContext();
14472   SDLoc DL(St);
14473   // Details about the old store
14474   SDValue Ch = St->getChain();
14475   SDValue BasePtr = St->getBasePtr();
14476   Align Alignment = St->getOriginalAlign();
14477   MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
14478   AAMDNodes AAInfo = St->getAAInfo();
14479 
14480   // We split the store into slices of NumElements. fp16 trunc stores are vcvt
14481   // and then stored as truncating integer stores.
14482   EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements);
14483   EVT NewToVT = EVT::getVectorVT(
14484       C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements);
14485 
14486   SmallVector<SDValue, 4> Stores;
14487   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
14488     unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
14489     SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
14490 
14491     SDValue Extract =
14492         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
14493                     DAG.getConstant(i * NumElements, DL, MVT::i32));
14494 
14495     if (ToEltVT == MVT::f16) {
14496       SDValue FPTrunc =
14497           DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16),
14498                       Extract, DAG.getConstant(0, DL, MVT::i32));
14499       Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc);
14500     }
14501 
14502     SDValue Store = DAG.getTruncStore(
14503         Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
14504         NewToVT, Alignment.value(), MMOFlags, AAInfo);
14505     Stores.push_back(Store);
14506   }
14507   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
14508 }
14509 
14510 /// PerformSTORECombine - Target-specific dag combine xforms for
14511 /// ISD::STORE.
14512 static SDValue PerformSTORECombine(SDNode *N,
14513                                    TargetLowering::DAGCombinerInfo &DCI,
14514                                    const ARMSubtarget *Subtarget) {
14515   StoreSDNode *St = cast<StoreSDNode>(N);
14516   if (St->isVolatile())
14517     return SDValue();
14518   SDValue StVal = St->getValue();
14519   EVT VT = StVal.getValueType();
14520 
14521   if (Subtarget->hasNEON())
14522     if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG))
14523       return Store;
14524 
14525   if (Subtarget->hasMVEIntegerOps())
14526     if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG))
14527       return NewToken;
14528 
14529   if (!ISD::isNormalStore(St))
14530     return SDValue();
14531 
14532   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
14533   // ARM stores of arguments in the same cache line.
14534   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
14535       StVal.getNode()->hasOneUse()) {
14536     SelectionDAG  &DAG = DCI.DAG;
14537     bool isBigEndian = DAG.getDataLayout().isBigEndian();
14538     SDLoc DL(St);
14539     SDValue BasePtr = St->getBasePtr();
14540     SDValue NewST1 = DAG.getStore(
14541         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
14542         BasePtr, St->getPointerInfo(), St->getAlignment(),
14543         St->getMemOperand()->getFlags());
14544 
14545     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
14546                                     DAG.getConstant(4, DL, MVT::i32));
14547     return DAG.getStore(NewST1.getValue(0), DL,
14548                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
14549                         OffsetPtr, St->getPointerInfo(),
14550                         std::min(4U, St->getAlignment() / 2),
14551                         St->getMemOperand()->getFlags());
14552   }
14553 
14554   if (StVal.getValueType() == MVT::i64 &&
14555       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14556 
14557     // Bitcast an i64 store extracted from a vector to f64.
14558     // Otherwise, the i64 value will be legalized to a pair of i32 values.
14559     SelectionDAG &DAG = DCI.DAG;
14560     SDLoc dl(StVal);
14561     SDValue IntVec = StVal.getOperand(0);
14562     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
14563                                    IntVec.getValueType().getVectorNumElements());
14564     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
14565     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14566                                  Vec, StVal.getOperand(1));
14567     dl = SDLoc(N);
14568     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
14569     // Make the DAGCombiner fold the bitcasts.
14570     DCI.AddToWorklist(Vec.getNode());
14571     DCI.AddToWorklist(ExtElt.getNode());
14572     DCI.AddToWorklist(V.getNode());
14573     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
14574                         St->getPointerInfo(), St->getAlignment(),
14575                         St->getMemOperand()->getFlags(), St->getAAInfo());
14576   }
14577 
14578   // If this is a legal vector store, try to combine it into a VST1_UPD.
14579   if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() &&
14580       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
14581     return CombineBaseUpdate(N, DCI);
14582 
14583   return SDValue();
14584 }
14585 
14586 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
14587 /// can replace combinations of VMUL and VCVT (floating-point to integer)
14588 /// when the VMUL has a constant operand that is a power of 2.
14589 ///
14590 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
14591 ///  vmul.f32        d16, d17, d16
14592 ///  vcvt.s32.f32    d16, d16
14593 /// becomes:
14594 ///  vcvt.s32.f32    d16, d16, #3
14595 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
14596                                   const ARMSubtarget *Subtarget) {
14597   if (!Subtarget->hasNEON())
14598     return SDValue();
14599 
14600   SDValue Op = N->getOperand(0);
14601   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14602       Op.getOpcode() != ISD::FMUL)
14603     return SDValue();
14604 
14605   SDValue ConstVec = Op->getOperand(1);
14606   if (!isa<BuildVectorSDNode>(ConstVec))
14607     return SDValue();
14608 
14609   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14610   uint32_t FloatBits = FloatTy.getSizeInBits();
14611   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14612   uint32_t IntBits = IntTy.getSizeInBits();
14613   unsigned NumLanes = Op.getValueType().getVectorNumElements();
14614   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
14615     // These instructions only exist converting from f32 to i32. We can handle
14616     // smaller integers by generating an extra truncate, but larger ones would
14617     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
14618     // these intructions only support v2i32/v4i32 types.
14619     return SDValue();
14620   }
14621 
14622   BitVector UndefElements;
14623   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14624   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
14625   if (C == -1 || C == 0 || C > 32)
14626     return SDValue();
14627 
14628   SDLoc dl(N);
14629   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
14630   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
14631     Intrinsic::arm_neon_vcvtfp2fxu;
14632   SDValue FixConv = DAG.getNode(
14633       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
14634       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
14635       DAG.getConstant(C, dl, MVT::i32));
14636 
14637   if (IntBits < FloatBits)
14638     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
14639 
14640   return FixConv;
14641 }
14642 
14643 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
14644 /// can replace combinations of VCVT (integer to floating-point) and VDIV
14645 /// when the VDIV has a constant operand that is a power of 2.
14646 ///
14647 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
14648 ///  vcvt.f32.s32    d16, d16
14649 ///  vdiv.f32        d16, d17, d16
14650 /// becomes:
14651 ///  vcvt.f32.s32    d16, d16, #3
14652 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
14653                                   const ARMSubtarget *Subtarget) {
14654   if (!Subtarget->hasNEON())
14655     return SDValue();
14656 
14657   SDValue Op = N->getOperand(0);
14658   unsigned OpOpcode = Op.getNode()->getOpcode();
14659   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
14660       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
14661     return SDValue();
14662 
14663   SDValue ConstVec = N->getOperand(1);
14664   if (!isa<BuildVectorSDNode>(ConstVec))
14665     return SDValue();
14666 
14667   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14668   uint32_t FloatBits = FloatTy.getSizeInBits();
14669   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14670   uint32_t IntBits = IntTy.getSizeInBits();
14671   unsigned NumLanes = Op.getValueType().getVectorNumElements();
14672   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
14673     // These instructions only exist converting from i32 to f32. We can handle
14674     // smaller integers by generating an extra extend, but larger ones would
14675     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
14676     // these intructions only support v2i32/v4i32 types.
14677     return SDValue();
14678   }
14679 
14680   BitVector UndefElements;
14681   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14682   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
14683   if (C == -1 || C == 0 || C > 32)
14684     return SDValue();
14685 
14686   SDLoc dl(N);
14687   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
14688   SDValue ConvInput = Op.getOperand(0);
14689   if (IntBits < FloatBits)
14690     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
14691                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
14692                             ConvInput);
14693 
14694   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
14695     Intrinsic::arm_neon_vcvtfxu2fp;
14696   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
14697                      Op.getValueType(),
14698                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
14699                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
14700 }
14701 
14702 static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
14703                                            const ARMSubtarget *ST) {
14704   if (!ST->hasMVEIntegerOps())
14705     return SDValue();
14706 
14707   assert(N->getOpcode() == ISD::VECREDUCE_ADD);
14708   EVT ResVT = N->getValueType(0);
14709   SDValue N0 = N->getOperand(0);
14710   SDLoc dl(N);
14711 
14712   // We are looking for something that will have illegal types if left alone,
14713   // but that we can convert to a single instruction undef MVE. For example
14714   // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A
14715   // or
14716   // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B
14717 
14718   // Cases:
14719   //   VADDV u/s 8/16/32
14720   //   VMLAV u/s 8/16/32
14721   //   VADDLV u/s 32
14722   //   VMLALV u/s 16/32
14723 
14724   auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) {
14725     if (ResVT != RetTy || N0->getOpcode() != ExtendCode)
14726       return SDValue();
14727     SDValue A = N0->getOperand(0);
14728     if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
14729       return A;
14730     return SDValue();
14731   };
14732   auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
14733                      SDValue &A, SDValue &B) {
14734     if (ResVT != RetTy || N0->getOpcode() != ISD::MUL)
14735       return false;
14736     SDValue ExtA = N0->getOperand(0);
14737     SDValue ExtB = N0->getOperand(1);
14738     if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode)
14739       return false;
14740     A = ExtA->getOperand(0);
14741     B = ExtB->getOperand(0);
14742     if (A.getValueType() == B.getValueType() &&
14743         llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
14744       return true;
14745     return false;
14746   };
14747   auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) {
14748     SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops);
14749     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node,
14750                        SDValue(Node.getNode(), 1));
14751   };
14752 
14753   if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}))
14754     return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A);
14755   if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}))
14756     return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A);
14757   if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}))
14758     return Create64bitNode(ARMISD::VADDLVs, {A});
14759   if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
14760     return Create64bitNode(ARMISD::VADDLVu, {A});
14761 
14762   SDValue A, B;
14763   if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
14764     return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);
14765   if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
14766     return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B);
14767   if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
14768     return Create64bitNode(ARMISD::VMLALVs, {A, B});
14769   if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
14770     return Create64bitNode(ARMISD::VMLALVu, {A, B});
14771   return SDValue();
14772 }
14773 
14774 static SDValue PerformVMOVNCombine(SDNode *N,
14775                                    TargetLowering::DAGCombinerInfo &DCI) {
14776   SDValue Op0 = N->getOperand(0);
14777   SDValue Op1 = N->getOperand(1);
14778   unsigned IsTop = N->getConstantOperandVal(2);
14779 
14780   // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
14781   // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b)
14782   if ((Op1->getOpcode() == ARMISD::VQMOVNs ||
14783        Op1->getOpcode() == ARMISD::VQMOVNu) &&
14784       Op1->getConstantOperandVal(2) == 0)
14785     return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0),
14786                            Op0, Op1->getOperand(1), N->getOperand(2));
14787 
14788   // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from
14789   // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting
14790   // into the top or bottom lanes.
14791   unsigned NumElts = N->getValueType(0).getVectorNumElements();
14792   APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1));
14793   APInt Op0DemandedElts =
14794       IsTop ? Op1DemandedElts
14795             : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1));
14796 
14797   APInt KnownUndef, KnownZero;
14798   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
14799   if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
14800                                      KnownZero, DCI))
14801     return SDValue(N, 0);
14802   if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef,
14803                                      KnownZero, DCI))
14804     return SDValue(N, 0);
14805 
14806   return SDValue();
14807 }
14808 
14809 static SDValue PerformVQMOVNCombine(SDNode *N,
14810                                     TargetLowering::DAGCombinerInfo &DCI) {
14811   SDValue Op0 = N->getOperand(0);
14812   unsigned IsTop = N->getConstantOperandVal(2);
14813 
14814   unsigned NumElts = N->getValueType(0).getVectorNumElements();
14815   APInt Op0DemandedElts =
14816       APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
14817                                      : APInt::getHighBitsSet(2, 1));
14818 
14819   APInt KnownUndef, KnownZero;
14820   const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
14821   if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
14822                                      KnownZero, DCI))
14823     return SDValue(N, 0);
14824   return SDValue();
14825 }
14826 
14827 static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {
14828   SDLoc DL(N);
14829   SDValue Op0 = N->getOperand(0);
14830   SDValue Op1 = N->getOperand(1);
14831 
14832   // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from
14833   // uses of the intrinsics.
14834   if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
14835     int ShiftAmt = C->getSExtValue();
14836     if (ShiftAmt == 0) {
14837       SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL);
14838       DAG.ReplaceAllUsesWith(N, Merge.getNode());
14839       return SDValue();
14840     }
14841 
14842     if (ShiftAmt >= -32 && ShiftAmt < 0) {
14843       unsigned NewOpcode =
14844           N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL;
14845       SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1,
14846                                      DAG.getConstant(-ShiftAmt, DL, MVT::i32));
14847       DAG.ReplaceAllUsesWith(N, NewShift.getNode());
14848       return NewShift;
14849     }
14850   }
14851 
14852   return SDValue();
14853 }
14854 
14855 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
14856 SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
14857                                                    DAGCombinerInfo &DCI) const {
14858   SelectionDAG &DAG = DCI.DAG;
14859   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14860   switch (IntNo) {
14861   default:
14862     // Don't do anything for most intrinsics.
14863     break;
14864 
14865   // Vector shifts: check for immediate versions and lower them.
14866   // Note: This is done during DAG combining instead of DAG legalizing because
14867   // the build_vectors for 64-bit vector element shift counts are generally
14868   // not legal, and it is hard to see their values after they get legalized to
14869   // loads from a constant pool.
14870   case Intrinsic::arm_neon_vshifts:
14871   case Intrinsic::arm_neon_vshiftu:
14872   case Intrinsic::arm_neon_vrshifts:
14873   case Intrinsic::arm_neon_vrshiftu:
14874   case Intrinsic::arm_neon_vrshiftn:
14875   case Intrinsic::arm_neon_vqshifts:
14876   case Intrinsic::arm_neon_vqshiftu:
14877   case Intrinsic::arm_neon_vqshiftsu:
14878   case Intrinsic::arm_neon_vqshiftns:
14879   case Intrinsic::arm_neon_vqshiftnu:
14880   case Intrinsic::arm_neon_vqshiftnsu:
14881   case Intrinsic::arm_neon_vqrshiftns:
14882   case Intrinsic::arm_neon_vqrshiftnu:
14883   case Intrinsic::arm_neon_vqrshiftnsu: {
14884     EVT VT = N->getOperand(1).getValueType();
14885     int64_t Cnt;
14886     unsigned VShiftOpc = 0;
14887 
14888     switch (IntNo) {
14889     case Intrinsic::arm_neon_vshifts:
14890     case Intrinsic::arm_neon_vshiftu:
14891       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
14892         VShiftOpc = ARMISD::VSHLIMM;
14893         break;
14894       }
14895       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
14896         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM
14897                                                           : ARMISD::VSHRuIMM);
14898         break;
14899       }
14900       return SDValue();
14901 
14902     case Intrinsic::arm_neon_vrshifts:
14903     case Intrinsic::arm_neon_vrshiftu:
14904       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
14905         break;
14906       return SDValue();
14907 
14908     case Intrinsic::arm_neon_vqshifts:
14909     case Intrinsic::arm_neon_vqshiftu:
14910       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
14911         break;
14912       return SDValue();
14913 
14914     case Intrinsic::arm_neon_vqshiftsu:
14915       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
14916         break;
14917       llvm_unreachable("invalid shift count for vqshlu intrinsic");
14918 
14919     case Intrinsic::arm_neon_vrshiftn:
14920     case Intrinsic::arm_neon_vqshiftns:
14921     case Intrinsic::arm_neon_vqshiftnu:
14922     case Intrinsic::arm_neon_vqshiftnsu:
14923     case Intrinsic::arm_neon_vqrshiftns:
14924     case Intrinsic::arm_neon_vqrshiftnu:
14925     case Intrinsic::arm_neon_vqrshiftnsu:
14926       // Narrowing shifts require an immediate right shift.
14927       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
14928         break;
14929       llvm_unreachable("invalid shift count for narrowing vector shift "
14930                        "intrinsic");
14931 
14932     default:
14933       llvm_unreachable("unhandled vector shift");
14934     }
14935 
14936     switch (IntNo) {
14937     case Intrinsic::arm_neon_vshifts:
14938     case Intrinsic::arm_neon_vshiftu:
14939       // Opcode already set above.
14940       break;
14941     case Intrinsic::arm_neon_vrshifts:
14942       VShiftOpc = ARMISD::VRSHRsIMM;
14943       break;
14944     case Intrinsic::arm_neon_vrshiftu:
14945       VShiftOpc = ARMISD::VRSHRuIMM;
14946       break;
14947     case Intrinsic::arm_neon_vrshiftn:
14948       VShiftOpc = ARMISD::VRSHRNIMM;
14949       break;
14950     case Intrinsic::arm_neon_vqshifts:
14951       VShiftOpc = ARMISD::VQSHLsIMM;
14952       break;
14953     case Intrinsic::arm_neon_vqshiftu:
14954       VShiftOpc = ARMISD::VQSHLuIMM;
14955       break;
14956     case Intrinsic::arm_neon_vqshiftsu:
14957       VShiftOpc = ARMISD::VQSHLsuIMM;
14958       break;
14959     case Intrinsic::arm_neon_vqshiftns:
14960       VShiftOpc = ARMISD::VQSHRNsIMM;
14961       break;
14962     case Intrinsic::arm_neon_vqshiftnu:
14963       VShiftOpc = ARMISD::VQSHRNuIMM;
14964       break;
14965     case Intrinsic::arm_neon_vqshiftnsu:
14966       VShiftOpc = ARMISD::VQSHRNsuIMM;
14967       break;
14968     case Intrinsic::arm_neon_vqrshiftns:
14969       VShiftOpc = ARMISD::VQRSHRNsIMM;
14970       break;
14971     case Intrinsic::arm_neon_vqrshiftnu:
14972       VShiftOpc = ARMISD::VQRSHRNuIMM;
14973       break;
14974     case Intrinsic::arm_neon_vqrshiftnsu:
14975       VShiftOpc = ARMISD::VQRSHRNsuIMM;
14976       break;
14977     }
14978 
14979     SDLoc dl(N);
14980     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
14981                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
14982   }
14983 
14984   case Intrinsic::arm_neon_vshiftins: {
14985     EVT VT = N->getOperand(1).getValueType();
14986     int64_t Cnt;
14987     unsigned VShiftOpc = 0;
14988 
14989     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
14990       VShiftOpc = ARMISD::VSLIIMM;
14991     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
14992       VShiftOpc = ARMISD::VSRIIMM;
14993     else {
14994       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
14995     }
14996 
14997     SDLoc dl(N);
14998     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
14999                        N->getOperand(1), N->getOperand(2),
15000                        DAG.getConstant(Cnt, dl, MVT::i32));
15001   }
15002 
15003   case Intrinsic::arm_neon_vqrshifts:
15004   case Intrinsic::arm_neon_vqrshiftu:
15005     // No immediate versions of these to check for.
15006     break;
15007 
15008   case Intrinsic::arm_mve_vqdmlah:
15009   case Intrinsic::arm_mve_vqdmlash:
15010   case Intrinsic::arm_mve_vqrdmlah:
15011   case Intrinsic::arm_mve_vqrdmlash:
15012   case Intrinsic::arm_mve_vmla_n_predicated:
15013   case Intrinsic::arm_mve_vmlas_n_predicated:
15014   case Intrinsic::arm_mve_vqdmlah_predicated:
15015   case Intrinsic::arm_mve_vqdmlash_predicated:
15016   case Intrinsic::arm_mve_vqrdmlah_predicated:
15017   case Intrinsic::arm_mve_vqrdmlash_predicated: {
15018     // These intrinsics all take an i32 scalar operand which is narrowed to the
15019     // size of a single lane of the vector type they return. So we don't need
15020     // any bits of that operand above that point, which allows us to eliminate
15021     // uxth/sxth.
15022     unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
15023     APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
15024     if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI))
15025       return SDValue();
15026     break;
15027   }
15028 
15029   case Intrinsic::arm_mve_minv:
15030   case Intrinsic::arm_mve_maxv:
15031   case Intrinsic::arm_mve_minav:
15032   case Intrinsic::arm_mve_maxav:
15033   case Intrinsic::arm_mve_minv_predicated:
15034   case Intrinsic::arm_mve_maxv_predicated:
15035   case Intrinsic::arm_mve_minav_predicated:
15036   case Intrinsic::arm_mve_maxav_predicated: {
15037     // These intrinsics all take an i32 scalar operand which is narrowed to the
15038     // size of a single lane of the vector type they take as the other input.
15039     unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits();
15040     APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
15041     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
15042       return SDValue();
15043     break;
15044   }
15045 
15046   case Intrinsic::arm_mve_addv: {
15047     // Turn this intrinsic straight into the appropriate ARMISD::VADDV node,
15048     // which allow PerformADDVecReduce to turn it into VADDLV when possible.
15049     bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
15050     unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs;
15051     return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1));
15052   }
15053 
15054   case Intrinsic::arm_mve_addlv:
15055   case Intrinsic::arm_mve_addlv_predicated: {
15056     // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR
15057     // which recombines the two outputs into an i64
15058     bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
15059     unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ?
15060                     (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) :
15061                     (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps);
15062 
15063     SmallVector<SDValue, 4> Ops;
15064     for (unsigned i = 1, e = N->getNumOperands(); i < e; i++)
15065       if (i != 2)                      // skip the unsigned flag
15066         Ops.push_back(N->getOperand(i));
15067 
15068     SDLoc dl(N);
15069     SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops);
15070     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0),
15071                        val.getValue(1));
15072   }
15073   }
15074 
15075   return SDValue();
15076 }
15077 
15078 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
15079 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
15080 /// combining instead of DAG legalizing because the build_vectors for 64-bit
15081 /// vector element shift counts are generally not legal, and it is hard to see
15082 /// their values after they get legalized to loads from a constant pool.
15083 static SDValue PerformShiftCombine(SDNode *N,
15084                                    TargetLowering::DAGCombinerInfo &DCI,
15085                                    const ARMSubtarget *ST) {
15086   SelectionDAG &DAG = DCI.DAG;
15087   EVT VT = N->getValueType(0);
15088   if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
15089     // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
15090     // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
15091     SDValue N1 = N->getOperand(1);
15092     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
15093       SDValue N0 = N->getOperand(0);
15094       if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
15095           DAG.MaskedValueIsZero(N0.getOperand(0),
15096                                 APInt::getHighBitsSet(32, 16)))
15097         return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
15098     }
15099   }
15100 
15101   if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
15102       N->getOperand(0)->getOpcode() == ISD::AND &&
15103       N->getOperand(0)->hasOneUse()) {
15104     if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
15105       return SDValue();
15106     // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
15107     // usually show up because instcombine prefers to canonicalize it to
15108     // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
15109     // out of GEP lowering in some cases.
15110     SDValue N0 = N->getOperand(0);
15111     ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
15112     if (!ShiftAmtNode)
15113       return SDValue();
15114     uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
15115     ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
15116     if (!AndMaskNode)
15117       return SDValue();
15118     uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
15119     // Don't transform uxtb/uxth.
15120     if (AndMask == 255 || AndMask == 65535)
15121       return SDValue();
15122     if (isMask_32(AndMask)) {
15123       uint32_t MaskedBits = countLeadingZeros(AndMask);
15124       if (MaskedBits > ShiftAmt) {
15125         SDLoc DL(N);
15126         SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
15127                                   DAG.getConstant(MaskedBits, DL, MVT::i32));
15128         return DAG.getNode(
15129             ISD::SRL, DL, MVT::i32, SHL,
15130             DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
15131       }
15132     }
15133   }
15134 
15135   // Nothing to be done for scalar shifts.
15136   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15137   if (!VT.isVector() || !TLI.isTypeLegal(VT))
15138     return SDValue();
15139   if (ST->hasMVEIntegerOps() && VT == MVT::v2i64)
15140     return SDValue();
15141 
15142   int64_t Cnt;
15143 
15144   switch (N->getOpcode()) {
15145   default: llvm_unreachable("unexpected shift opcode");
15146 
15147   case ISD::SHL:
15148     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
15149       SDLoc dl(N);
15150       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
15151                          DAG.getConstant(Cnt, dl, MVT::i32));
15152     }
15153     break;
15154 
15155   case ISD::SRA:
15156   case ISD::SRL:
15157     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
15158       unsigned VShiftOpc =
15159           (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
15160       SDLoc dl(N);
15161       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
15162                          DAG.getConstant(Cnt, dl, MVT::i32));
15163     }
15164   }
15165   return SDValue();
15166 }
15167 
15168 // Look for a sign/zero/fpextend extend of a larger than legal load. This can be
15169 // split into multiple extending loads, which are simpler to deal with than an
15170 // arbitrary extend. For fp extends we use an integer extending load and a VCVTL
15171 // to convert the type to an f32.
15172 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
15173   SDValue N0 = N->getOperand(0);
15174   if (N0.getOpcode() != ISD::LOAD)
15175     return SDValue();
15176   LoadSDNode *LD = cast<LoadSDNode>(N0.getNode());
15177   if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() ||
15178       LD->getExtensionType() != ISD::NON_EXTLOAD)
15179     return SDValue();
15180   EVT FromVT = LD->getValueType(0);
15181   EVT ToVT = N->getValueType(0);
15182   if (!ToVT.isVector())
15183     return SDValue();
15184   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
15185   EVT ToEltVT = ToVT.getVectorElementType();
15186   EVT FromEltVT = FromVT.getVectorElementType();
15187 
15188   unsigned NumElements = 0;
15189   if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8))
15190     NumElements = 4;
15191   if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8)
15192     NumElements = 8;
15193   if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16)
15194     NumElements = 4;
15195   if (NumElements == 0 ||
15196       (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) ||
15197       FromVT.getVectorNumElements() % NumElements != 0 ||
15198       !isPowerOf2_32(NumElements))
15199     return SDValue();
15200 
15201   LLVMContext &C = *DAG.getContext();
15202   SDLoc DL(LD);
15203   // Details about the old load
15204   SDValue Ch = LD->getChain();
15205   SDValue BasePtr = LD->getBasePtr();
15206   Align Alignment = LD->getOriginalAlign();
15207   MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
15208   AAMDNodes AAInfo = LD->getAAInfo();
15209 
15210   ISD::LoadExtType NewExtType =
15211       N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
15212   SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
15213   EVT NewFromVT = EVT::getVectorVT(
15214       C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements);
15215   EVT NewToVT = EVT::getVectorVT(
15216       C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements);
15217 
15218   SmallVector<SDValue, 4> Loads;
15219   SmallVector<SDValue, 4> Chains;
15220   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
15221     unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
15222     SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
15223 
15224     SDValue NewLoad =
15225         DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
15226                     LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
15227                     Alignment.value(), MMOFlags, AAInfo);
15228     Loads.push_back(NewLoad);
15229     Chains.push_back(SDValue(NewLoad.getNode(), 1));
15230   }
15231 
15232   // Float truncs need to extended with VCVTB's into their floating point types.
15233   if (FromEltVT == MVT::f16) {
15234     SmallVector<SDValue, 4> Extends;
15235 
15236     for (unsigned i = 0; i < Loads.size(); i++) {
15237       SDValue LoadBC =
15238           DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]);
15239       SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC,
15240                                   DAG.getConstant(0, DL, MVT::i32));
15241       Extends.push_back(FPExt);
15242     }
15243 
15244     Loads = Extends;
15245   }
15246 
15247   SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
15248   DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
15249   return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads);
15250 }
15251 
15252 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
15253 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
15254 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
15255                                     const ARMSubtarget *ST) {
15256   SDValue N0 = N->getOperand(0);
15257 
15258   // Check for sign- and zero-extensions of vector extract operations of 8- and
15259   // 16-bit vector elements. NEON and MVE support these directly. They are
15260   // handled during DAG combining because type legalization will promote them
15261   // to 32-bit types and it is messy to recognize the operations after that.
15262   if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
15263       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
15264     SDValue Vec = N0.getOperand(0);
15265     SDValue Lane = N0.getOperand(1);
15266     EVT VT = N->getValueType(0);
15267     EVT EltVT = N0.getValueType();
15268     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15269 
15270     if (VT == MVT::i32 &&
15271         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
15272         TLI.isTypeLegal(Vec.getValueType()) &&
15273         isa<ConstantSDNode>(Lane)) {
15274 
15275       unsigned Opc = 0;
15276       switch (N->getOpcode()) {
15277       default: llvm_unreachable("unexpected opcode");
15278       case ISD::SIGN_EXTEND:
15279         Opc = ARMISD::VGETLANEs;
15280         break;
15281       case ISD::ZERO_EXTEND:
15282       case ISD::ANY_EXTEND:
15283         Opc = ARMISD::VGETLANEu;
15284         break;
15285       }
15286       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
15287     }
15288   }
15289 
15290   if (ST->hasMVEIntegerOps())
15291     if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
15292       return NewLoad;
15293 
15294   return SDValue();
15295 }
15296 
15297 static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
15298                                       const ARMSubtarget *ST) {
15299   if (ST->hasMVEFloatOps())
15300     if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
15301       return NewLoad;
15302 
15303   return SDValue();
15304 }
15305 
15306 /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating
15307 /// saturates.
15308 static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
15309                                     const ARMSubtarget *ST) {
15310   EVT VT = N->getValueType(0);
15311   SDValue N0 = N->getOperand(0);
15312   if (!ST->hasMVEIntegerOps())
15313     return SDValue();
15314 
15315   if (VT != MVT::v4i32 && VT != MVT::v8i16)
15316     return SDValue();
15317 
15318   auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) {
15319     // Check one is a smin and the other is a smax
15320     if (Min->getOpcode() != ISD::SMIN)
15321       std::swap(Min, Max);
15322     if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX)
15323       return false;
15324 
15325     APInt SaturateC;
15326     if (VT == MVT::v4i32)
15327       SaturateC = APInt(32, (1 << 15) - 1, true);
15328     else //if (VT == MVT::v8i16)
15329       SaturateC = APInt(16, (1 << 7) - 1, true);
15330 
15331     APInt MinC, MaxC;
15332     if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
15333         MinC != SaturateC)
15334       return false;
15335     if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) ||
15336         MaxC != ~SaturateC)
15337       return false;
15338     return true;
15339   };
15340 
15341   if (IsSignedSaturate(N, N0.getNode())) {
15342     SDLoc DL(N);
15343     MVT ExtVT, HalfVT;
15344     if (VT == MVT::v4i32) {
15345       HalfVT = MVT::v8i16;
15346       ExtVT = MVT::v4i16;
15347     } else { // if (VT == MVT::v8i16)
15348       HalfVT = MVT::v16i8;
15349       ExtVT = MVT::v8i8;
15350     }
15351 
15352     // Create a VQMOVNB with undef top lanes, then signed extended into the top
15353     // half. That extend will hopefully be removed if only the bottom bits are
15354     // demanded (though a truncating store, for example).
15355     SDValue VQMOVN =
15356         DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT),
15357                     N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32));
15358     SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
15359     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast,
15360                        DAG.getValueType(ExtVT));
15361   }
15362 
15363   auto IsUnsignedSaturate = [&](SDNode *Min) {
15364     // For unsigned, we just need to check for <= 0xffff
15365     if (Min->getOpcode() != ISD::UMIN)
15366       return false;
15367 
15368     APInt SaturateC;
15369     if (VT == MVT::v4i32)
15370       SaturateC = APInt(32, (1 << 16) - 1, true);
15371     else //if (VT == MVT::v8i16)
15372       SaturateC = APInt(16, (1 << 8) - 1, true);
15373 
15374     APInt MinC;
15375     if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
15376         MinC != SaturateC)
15377       return false;
15378     return true;
15379   };
15380 
15381   if (IsUnsignedSaturate(N)) {
15382     SDLoc DL(N);
15383     MVT HalfVT;
15384     unsigned ExtConst;
15385     if (VT == MVT::v4i32) {
15386       HalfVT = MVT::v8i16;
15387       ExtConst = 0x0000FFFF;
15388     } else { //if (VT == MVT::v8i16)
15389       HalfVT = MVT::v16i8;
15390       ExtConst = 0x00FF;
15391     }
15392 
15393     // Create a VQMOVNB with undef top lanes, then ZExt into the top half with
15394     // an AND. That extend will hopefully be removed if only the bottom bits are
15395     // demanded (though a truncating store, for example).
15396     SDValue VQMOVN =
15397         DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0,
15398                     DAG.getConstant(0, DL, MVT::i32));
15399     SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
15400     return DAG.getNode(ISD::AND, DL, VT, Bitcast,
15401                        DAG.getConstant(ExtConst, DL, VT));
15402   }
15403 
15404   return SDValue();
15405 }
15406 
15407 static const APInt *isPowerOf2Constant(SDValue V) {
15408   ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15409   if (!C)
15410     return nullptr;
15411   const APInt *CV = &C->getAPIntValue();
15412   return CV->isPowerOf2() ? CV : nullptr;
15413 }
15414 
15415 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
15416   // If we have a CMOV, OR and AND combination such as:
15417   //   if (x & CN)
15418   //     y |= CM;
15419   //
15420   // And:
15421   //   * CN is a single bit;
15422   //   * All bits covered by CM are known zero in y
15423   //
15424   // Then we can convert this into a sequence of BFI instructions. This will
15425   // always be a win if CM is a single bit, will always be no worse than the
15426   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
15427   // three bits (due to the extra IT instruction).
15428 
15429   SDValue Op0 = CMOV->getOperand(0);
15430   SDValue Op1 = CMOV->getOperand(1);
15431   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
15432   auto CC = CCNode->getAPIntValue().getLimitedValue();
15433   SDValue CmpZ = CMOV->getOperand(4);
15434 
15435   // The compare must be against zero.
15436   if (!isNullConstant(CmpZ->getOperand(1)))
15437     return SDValue();
15438 
15439   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
15440   SDValue And = CmpZ->getOperand(0);
15441   if (And->getOpcode() != ISD::AND)
15442     return SDValue();
15443   const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
15444   if (!AndC)
15445     return SDValue();
15446   SDValue X = And->getOperand(0);
15447 
15448   if (CC == ARMCC::EQ) {
15449     // We're performing an "equal to zero" compare. Swap the operands so we
15450     // canonicalize on a "not equal to zero" compare.
15451     std::swap(Op0, Op1);
15452   } else {
15453     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
15454   }
15455 
15456   if (Op1->getOpcode() != ISD::OR)
15457     return SDValue();
15458 
15459   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
15460   if (!OrC)
15461     return SDValue();
15462   SDValue Y = Op1->getOperand(0);
15463 
15464   if (Op0 != Y)
15465     return SDValue();
15466 
15467   // Now, is it profitable to continue?
15468   APInt OrCI = OrC->getAPIntValue();
15469   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
15470   if (OrCI.countPopulation() > Heuristic)
15471     return SDValue();
15472 
15473   // Lastly, can we determine that the bits defined by OrCI
15474   // are zero in Y?
15475   KnownBits Known = DAG.computeKnownBits(Y);
15476   if ((OrCI & Known.Zero) != OrCI)
15477     return SDValue();
15478 
15479   // OK, we can do the combine.
15480   SDValue V = Y;
15481   SDLoc dl(X);
15482   EVT VT = X.getValueType();
15483   unsigned BitInX = AndC->logBase2();
15484 
15485   if (BitInX != 0) {
15486     // We must shift X first.
15487     X = DAG.getNode(ISD::SRL, dl, VT, X,
15488                     DAG.getConstant(BitInX, dl, VT));
15489   }
15490 
15491   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
15492        BitInY < NumActiveBits; ++BitInY) {
15493     if (OrCI[BitInY] == 0)
15494       continue;
15495     APInt Mask(VT.getSizeInBits(), 0);
15496     Mask.setBit(BitInY);
15497     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
15498                     // Confusingly, the operand is an *inverted* mask.
15499                     DAG.getConstant(~Mask, dl, VT));
15500   }
15501 
15502   return V;
15503 }
15504 
15505 // Given N, the value controlling the conditional branch, search for the loop
15506 // intrinsic, returning it, along with how the value is used. We need to handle
15507 // patterns such as the following:
15508 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
15509 // (brcond (setcc (loop.decrement), 0, eq), exit)
15510 // (brcond (setcc (loop.decrement), 0, ne), header)
15511 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
15512                                    bool &Negate) {
15513   switch (N->getOpcode()) {
15514   default:
15515     break;
15516   case ISD::XOR: {
15517     if (!isa<ConstantSDNode>(N.getOperand(1)))
15518       return SDValue();
15519     if (!cast<ConstantSDNode>(N.getOperand(1))->isOne())
15520       return SDValue();
15521     Negate = !Negate;
15522     return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate);
15523   }
15524   case ISD::SETCC: {
15525     auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1));
15526     if (!Const)
15527       return SDValue();
15528     if (Const->isNullValue())
15529       Imm = 0;
15530     else if (Const->isOne())
15531       Imm = 1;
15532     else
15533       return SDValue();
15534     CC = cast<CondCodeSDNode>(N.getOperand(2))->get();
15535     return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
15536   }
15537   case ISD::INTRINSIC_W_CHAIN: {
15538     unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
15539     if (IntOp != Intrinsic::test_set_loop_iterations &&
15540         IntOp != Intrinsic::loop_decrement_reg)
15541       return SDValue();
15542     return N;
15543   }
15544   }
15545   return SDValue();
15546 }
15547 
15548 static SDValue PerformHWLoopCombine(SDNode *N,
15549                                     TargetLowering::DAGCombinerInfo &DCI,
15550                                     const ARMSubtarget *ST) {
15551 
15552   // The hwloop intrinsics that we're interested are used for control-flow,
15553   // either for entering or exiting the loop:
15554   // - test.set.loop.iterations will test whether its operand is zero. If it
15555   //   is zero, the proceeding branch should not enter the loop.
15556   // - loop.decrement.reg also tests whether its operand is zero. If it is
15557   //   zero, the proceeding branch should not branch back to the beginning of
15558   //   the loop.
15559   // So here, we need to check that how the brcond is using the result of each
15560   // of the intrinsics to ensure that we're branching to the right place at the
15561   // right time.
15562 
15563   ISD::CondCode CC;
15564   SDValue Cond;
15565   int Imm = 1;
15566   bool Negate = false;
15567   SDValue Chain = N->getOperand(0);
15568   SDValue Dest;
15569 
15570   if (N->getOpcode() == ISD::BRCOND) {
15571     CC = ISD::SETEQ;
15572     Cond = N->getOperand(1);
15573     Dest = N->getOperand(2);
15574   } else {
15575     assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!");
15576     CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15577     Cond = N->getOperand(2);
15578     Dest = N->getOperand(4);
15579     if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) {
15580       if (!Const->isOne() && !Const->isNullValue())
15581         return SDValue();
15582       Imm = Const->getZExtValue();
15583     } else
15584       return SDValue();
15585   }
15586 
15587   SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate);
15588   if (!Int)
15589     return SDValue();
15590 
15591   if (Negate)
15592     CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32);
15593 
15594   auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) {
15595     return (CC == ISD::SETEQ && Imm == 0) ||
15596            (CC == ISD::SETNE && Imm == 1) ||
15597            (CC == ISD::SETLT && Imm == 1) ||
15598            (CC == ISD::SETULT && Imm == 1);
15599   };
15600 
15601   auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) {
15602     return (CC == ISD::SETEQ && Imm == 1) ||
15603            (CC == ISD::SETNE && Imm == 0) ||
15604            (CC == ISD::SETGT && Imm == 0) ||
15605            (CC == ISD::SETUGT && Imm == 0) ||
15606            (CC == ISD::SETGE && Imm == 1) ||
15607            (CC == ISD::SETUGE && Imm == 1);
15608   };
15609 
15610   assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
15611          "unsupported condition");
15612 
15613   SDLoc dl(Int);
15614   SelectionDAG &DAG = DCI.DAG;
15615   SDValue Elements = Int.getOperand(2);
15616   unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
15617   assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
15618           && "expected single br user");
15619   SDNode *Br = *N->use_begin();
15620   SDValue OtherTarget = Br->getOperand(1);
15621 
15622   // Update the unconditional branch to branch to the given Dest.
15623   auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) {
15624     SDValue NewBrOps[] = { Br->getOperand(0), Dest };
15625     SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps);
15626     DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr);
15627   };
15628 
15629   if (IntOp == Intrinsic::test_set_loop_iterations) {
15630     SDValue Res;
15631     // We expect this 'instruction' to branch when the counter is zero.
15632     if (IsTrueIfZero(CC, Imm)) {
15633       SDValue Ops[] = { Chain, Elements, Dest };
15634       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
15635     } else {
15636       // The logic is the reverse of what we need for WLS, so find the other
15637       // basic block target: the target of the proceeding br.
15638       UpdateUncondBr(Br, Dest, DAG);
15639 
15640       SDValue Ops[] = { Chain, Elements, OtherTarget };
15641       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
15642     }
15643     DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0));
15644     return Res;
15645   } else {
15646     SDValue Size = DAG.getTargetConstant(
15647       cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
15648     SDValue Args[] = { Int.getOperand(0), Elements, Size, };
15649     SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
15650                                   DAG.getVTList(MVT::i32, MVT::Other), Args);
15651     DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode());
15652 
15653     // We expect this instruction to branch when the count is not zero.
15654     SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;
15655 
15656     // Update the unconditional branch to target the loop preheader if we've
15657     // found the condition has been reversed.
15658     if (Target == OtherTarget)
15659       UpdateUncondBr(Br, Dest, DAG);
15660 
15661     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15662                         SDValue(LoopDec.getNode(), 1), Chain);
15663 
15664     SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target };
15665     return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs);
15666   }
15667   return SDValue();
15668 }
15669 
15670 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
15671 SDValue
15672 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
15673   SDValue Cmp = N->getOperand(4);
15674   if (Cmp.getOpcode() != ARMISD::CMPZ)
15675     // Only looking at NE cases.
15676     return SDValue();
15677 
15678   EVT VT = N->getValueType(0);
15679   SDLoc dl(N);
15680   SDValue LHS = Cmp.getOperand(0);
15681   SDValue RHS = Cmp.getOperand(1);
15682   SDValue Chain = N->getOperand(0);
15683   SDValue BB = N->getOperand(1);
15684   SDValue ARMcc = N->getOperand(2);
15685   ARMCC::CondCodes CC =
15686     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
15687 
15688   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
15689   // -> (brcond Chain BB CC CPSR Cmp)
15690   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
15691       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
15692       LHS->getOperand(0)->hasOneUse()) {
15693     auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
15694     auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
15695     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15696     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
15697     if ((LHS00C && LHS00C->getZExtValue() == 0) &&
15698         (LHS01C && LHS01C->getZExtValue() == 1) &&
15699         (LHS1C && LHS1C->getZExtValue() == 1) &&
15700         (RHSC && RHSC->getZExtValue() == 0)) {
15701       return DAG.getNode(
15702           ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
15703           LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
15704     }
15705   }
15706 
15707   return SDValue();
15708 }
15709 
15710 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
15711 SDValue
15712 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
15713   SDValue Cmp = N->getOperand(4);
15714   if (Cmp.getOpcode() != ARMISD::CMPZ)
15715     // Only looking at EQ and NE cases.
15716     return SDValue();
15717 
15718   EVT VT = N->getValueType(0);
15719   SDLoc dl(N);
15720   SDValue LHS = Cmp.getOperand(0);
15721   SDValue RHS = Cmp.getOperand(1);
15722   SDValue FalseVal = N->getOperand(0);
15723   SDValue TrueVal = N->getOperand(1);
15724   SDValue ARMcc = N->getOperand(2);
15725   ARMCC::CondCodes CC =
15726     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
15727 
15728   // BFI is only available on V6T2+.
15729   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
15730     SDValue R = PerformCMOVToBFICombine(N, DAG);
15731     if (R)
15732       return R;
15733   }
15734 
15735   // Simplify
15736   //   mov     r1, r0
15737   //   cmp     r1, x
15738   //   mov     r0, y
15739   //   moveq   r0, x
15740   // to
15741   //   cmp     r0, x
15742   //   movne   r0, y
15743   //
15744   //   mov     r1, r0
15745   //   cmp     r1, x
15746   //   mov     r0, x
15747   //   movne   r0, y
15748   // to
15749   //   cmp     r0, x
15750   //   movne   r0, y
15751   /// FIXME: Turn this into a target neutral optimization?
15752   SDValue Res;
15753   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
15754     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
15755                       N->getOperand(3), Cmp);
15756   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
15757     SDValue ARMcc;
15758     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
15759     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
15760                       N->getOperand(3), NewCmp);
15761   }
15762 
15763   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
15764   // -> (cmov F T CC CPSR Cmp)
15765   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
15766     auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
15767     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15768     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
15769     if ((LHS0C && LHS0C->getZExtValue() == 0) &&
15770         (LHS1C && LHS1C->getZExtValue() == 1) &&
15771         (RHSC && RHSC->getZExtValue() == 0)) {
15772       return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
15773                          LHS->getOperand(2), LHS->getOperand(3),
15774                          LHS->getOperand(4));
15775     }
15776   }
15777 
15778   if (!VT.isInteger())
15779       return SDValue();
15780 
15781   // Materialize a boolean comparison for integers so we can avoid branching.
15782   if (isNullConstant(FalseVal)) {
15783     if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
15784       if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
15785         // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
15786         // right 5 bits will make that 32 be 1, otherwise it will be 0.
15787         // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
15788         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
15789         Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
15790                           DAG.getConstant(5, dl, MVT::i32));
15791       } else {
15792         // CMOV 0, 1, ==, (CMPZ x, y) ->
15793         //     (ADDCARRY (SUB x, y), t:0, t:1)
15794         // where t = (SUBCARRY 0, (SUB x, y), 0)
15795         //
15796         // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
15797         // x != y. In other words, a carry C == 1 when x == y, C == 0
15798         // otherwise.
15799         // The final ADDCARRY computes
15800         //     x - y + (0 - (x - y)) + C == C
15801         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
15802         SDVTList VTs = DAG.getVTList(VT, MVT::i32);
15803         SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
15804         // ISD::SUBCARRY returns a borrow but we want the carry here
15805         // actually.
15806         SDValue Carry =
15807             DAG.getNode(ISD::SUB, dl, MVT::i32,
15808                         DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
15809         Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry);
15810       }
15811     } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
15812                (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
15813       // This seems pointless but will allow us to combine it further below.
15814       // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
15815       SDValue Sub =
15816           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
15817       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
15818                                           Sub.getValue(1), SDValue());
15819       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
15820                         N->getOperand(3), CPSRGlue.getValue(1));
15821       FalseVal = Sub;
15822     }
15823   } else if (isNullConstant(TrueVal)) {
15824     if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
15825         (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
15826       // This seems pointless but will allow us to combine it further below
15827       // Note that we change == for != as this is the dual for the case above.
15828       // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
15829       SDValue Sub =
15830           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
15831       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
15832                                           Sub.getValue(1), SDValue());
15833       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
15834                         DAG.getConstant(ARMCC::NE, dl, MVT::i32),
15835                         N->getOperand(3), CPSRGlue.getValue(1));
15836       FalseVal = Sub;
15837     }
15838   }
15839 
15840   // On Thumb1, the DAG above may be further combined if z is a power of 2
15841   // (z == 2 ^ K).
15842   // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
15843   // t1 = (USUBO (SUB x, y), 1)
15844   // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
15845   // Result = if K != 0 then (SHL t2:0, K) else t2:0
15846   //
15847   // This also handles the special case of comparing against zero; it's
15848   // essentially, the same pattern, except there's no SUBS:
15849   // CMOV x, z, !=, (CMPZ x, 0) ->
15850   // t1 = (USUBO x, 1)
15851   // t2 = (SUBCARRY x, t1:0, t1:1)
15852   // Result = if K != 0 then (SHL t2:0, K) else t2:0
15853   const APInt *TrueConst;
15854   if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
15855       ((FalseVal.getOpcode() == ARMISD::SUBS &&
15856         FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
15857        (FalseVal == LHS && isNullConstant(RHS))) &&
15858       (TrueConst = isPowerOf2Constant(TrueVal))) {
15859     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
15860     unsigned ShiftAmount = TrueConst->logBase2();
15861     if (ShiftAmount)
15862       TrueVal = DAG.getConstant(1, dl, VT);
15863     SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
15864     Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1));
15865 
15866     if (ShiftAmount)
15867       Res = DAG.getNode(ISD::SHL, dl, VT, Res,
15868                         DAG.getConstant(ShiftAmount, dl, MVT::i32));
15869   }
15870 
15871   if (Res.getNode()) {
15872     KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
15873     // Capture demanded bits information that would be otherwise lost.
15874     if (Known.Zero == 0xfffffffe)
15875       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15876                         DAG.getValueType(MVT::i1));
15877     else if (Known.Zero == 0xffffff00)
15878       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15879                         DAG.getValueType(MVT::i8));
15880     else if (Known.Zero == 0xffff0000)
15881       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15882                         DAG.getValueType(MVT::i16));
15883   }
15884 
15885   return Res;
15886 }
15887 
15888 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
15889                                     const ARMSubtarget *ST) {
15890   SDValue Src = N->getOperand(0);
15891   EVT DstVT = N->getValueType(0);
15892 
15893   // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE.
15894   if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) {
15895     EVT SrcVT = Src.getValueType();
15896     if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits())
15897       return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0));
15898   }
15899 
15900   // We may have a bitcast of something that has already had this bitcast
15901   // combine performed on it, so skip past any VECTOR_REG_CASTs.
15902   while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST)
15903     Src = Src.getOperand(0);
15904 
15905   // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that
15906   // would be generated is at least the width of the element type.
15907   EVT SrcVT = Src.getValueType();
15908   if ((Src.getOpcode() == ARMISD::VMOVIMM ||
15909        Src.getOpcode() == ARMISD::VMVNIMM ||
15910        Src.getOpcode() == ARMISD::VMOVFPIMM) &&
15911       SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() &&
15912       DAG.getDataLayout().isBigEndian())
15913     return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src);
15914 
15915   return SDValue();
15916 }
15917 
15918 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
15919                                              DAGCombinerInfo &DCI) const {
15920   switch (N->getOpcode()) {
15921   default: break;
15922   case ISD::VSELECT:    return PerformVSELECTCombine(N, DCI, Subtarget);
15923   case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
15924   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
15925   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
15926   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
15927   case ISD::SUB:        return PerformSUBCombine(N, DCI, Subtarget);
15928   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
15929   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
15930   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
15931   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
15932   case ISD::BRCOND:
15933   case ISD::BR_CC:      return PerformHWLoopCombine(N, DCI, Subtarget);
15934   case ARMISD::ADDC:
15935   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI, Subtarget);
15936   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI, Subtarget);
15937   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
15938   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
15939   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
15940   case ARMISD::VMOVhr:  return PerformVMOVhrCombine(N, DCI);
15941   case ARMISD::VMOVrh:  return PerformVMOVrhCombine(N, DCI);
15942   case ISD::STORE:      return PerformSTORECombine(N, DCI, Subtarget);
15943   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
15944   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
15945   case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI);
15946   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
15947   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget);
15948   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget);
15949   case ISD::FP_TO_SINT:
15950   case ISD::FP_TO_UINT:
15951     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
15952   case ISD::FDIV:
15953     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
15954   case ISD::INTRINSIC_WO_CHAIN:
15955     return PerformIntrinsicCombine(N, DCI);
15956   case ISD::SHL:
15957   case ISD::SRA:
15958   case ISD::SRL:
15959     return PerformShiftCombine(N, DCI, Subtarget);
15960   case ISD::SIGN_EXTEND:
15961   case ISD::ZERO_EXTEND:
15962   case ISD::ANY_EXTEND:
15963     return PerformExtendCombine(N, DCI.DAG, Subtarget);
15964   case ISD::FP_EXTEND:
15965     return PerformFPExtendCombine(N, DCI.DAG, Subtarget);
15966   case ISD::SMIN:
15967   case ISD::UMIN:
15968   case ISD::SMAX:
15969   case ISD::UMAX:
15970     return PerformMinMaxCombine(N, DCI.DAG, Subtarget);
15971   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
15972   case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
15973   case ISD::LOAD:       return PerformLOADCombine(N, DCI);
15974   case ARMISD::VLD1DUP:
15975   case ARMISD::VLD2DUP:
15976   case ARMISD::VLD3DUP:
15977   case ARMISD::VLD4DUP:
15978     return PerformVLDCombine(N, DCI);
15979   case ARMISD::BUILD_VECTOR:
15980     return PerformARMBUILD_VECTORCombine(N, DCI);
15981   case ISD::BITCAST:
15982     return PerformBITCASTCombine(N, DCI.DAG, Subtarget);
15983   case ARMISD::PREDICATE_CAST:
15984     return PerformPREDICATE_CASTCombine(N, DCI);
15985   case ARMISD::VECTOR_REG_CAST:
15986     return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget);
15987   case ARMISD::VCMP:
15988     return PerformVCMPCombine(N, DCI, Subtarget);
15989   case ISD::VECREDUCE_ADD:
15990     return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget);
15991   case ARMISD::VMOVN:
15992     return PerformVMOVNCombine(N, DCI);
15993   case ARMISD::VQMOVNs:
15994   case ARMISD::VQMOVNu:
15995     return PerformVQMOVNCombine(N, DCI);
15996   case ARMISD::ASRL:
15997   case ARMISD::LSRL:
15998   case ARMISD::LSLL:
15999     return PerformLongShiftCombine(N, DCI.DAG);
16000   case ARMISD::SMULWB: {
16001     unsigned BitWidth = N->getValueType(0).getSizeInBits();
16002     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
16003     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
16004       return SDValue();
16005     break;
16006   }
16007   case ARMISD::SMULWT: {
16008     unsigned BitWidth = N->getValueType(0).getSizeInBits();
16009     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
16010     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
16011       return SDValue();
16012     break;
16013   }
16014   case ARMISD::SMLALBB:
16015   case ARMISD::QADD16b:
16016   case ARMISD::QSUB16b: {
16017     unsigned BitWidth = N->getValueType(0).getSizeInBits();
16018     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
16019     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16020         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16021       return SDValue();
16022     break;
16023   }
16024   case ARMISD::SMLALBT: {
16025     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
16026     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
16027     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
16028     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
16029     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
16030         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
16031       return SDValue();
16032     break;
16033   }
16034   case ARMISD::SMLALTB: {
16035     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
16036     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
16037     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
16038     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
16039     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
16040         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
16041       return SDValue();
16042     break;
16043   }
16044   case ARMISD::SMLALTT: {
16045     unsigned BitWidth = N->getValueType(0).getSizeInBits();
16046     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
16047     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16048         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16049       return SDValue();
16050     break;
16051   }
16052   case ARMISD::QADD8b:
16053   case ARMISD::QSUB8b: {
16054     unsigned BitWidth = N->getValueType(0).getSizeInBits();
16055     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
16056     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16057         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16058       return SDValue();
16059     break;
16060   }
16061   case ISD::INTRINSIC_VOID:
16062   case ISD::INTRINSIC_W_CHAIN:
16063     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
16064     case Intrinsic::arm_neon_vld1:
16065     case Intrinsic::arm_neon_vld1x2:
16066     case Intrinsic::arm_neon_vld1x3:
16067     case Intrinsic::arm_neon_vld1x4:
16068     case Intrinsic::arm_neon_vld2:
16069     case Intrinsic::arm_neon_vld3:
16070     case Intrinsic::arm_neon_vld4:
16071     case Intrinsic::arm_neon_vld2lane:
16072     case Intrinsic::arm_neon_vld3lane:
16073     case Intrinsic::arm_neon_vld4lane:
16074     case Intrinsic::arm_neon_vld2dup:
16075     case Intrinsic::arm_neon_vld3dup:
16076     case Intrinsic::arm_neon_vld4dup:
16077     case Intrinsic::arm_neon_vst1:
16078     case Intrinsic::arm_neon_vst1x2:
16079     case Intrinsic::arm_neon_vst1x3:
16080     case Intrinsic::arm_neon_vst1x4:
16081     case Intrinsic::arm_neon_vst2:
16082     case Intrinsic::arm_neon_vst3:
16083     case Intrinsic::arm_neon_vst4:
16084     case Intrinsic::arm_neon_vst2lane:
16085     case Intrinsic::arm_neon_vst3lane:
16086     case Intrinsic::arm_neon_vst4lane:
16087       return PerformVLDCombine(N, DCI);
16088     case Intrinsic::arm_mve_vld2q:
16089     case Intrinsic::arm_mve_vld4q:
16090     case Intrinsic::arm_mve_vst2q:
16091     case Intrinsic::arm_mve_vst4q:
16092       return PerformMVEVLDCombine(N, DCI);
16093     default: break;
16094     }
16095     break;
16096   }
16097   return SDValue();
16098 }
16099 
16100 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
16101                                                           EVT VT) const {
16102   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
16103 }
16104 
16105 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
16106                                                        unsigned Alignment,
16107                                                        MachineMemOperand::Flags,
16108                                                        bool *Fast) const {
16109   // Depends what it gets converted into if the type is weird.
16110   if (!VT.isSimple())
16111     return false;
16112 
16113   // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus
16114   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
16115   auto Ty = VT.getSimpleVT().SimpleTy;
16116 
16117   if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
16118     // Unaligned access can use (for example) LRDB, LRDH, LDR
16119     if (AllowsUnaligned) {
16120       if (Fast)
16121         *Fast = Subtarget->hasV7Ops();
16122       return true;
16123     }
16124   }
16125 
16126   if (Ty == MVT::f64 || Ty == MVT::v2f64) {
16127     // For any little-endian targets with neon, we can support unaligned ld/st
16128     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
16129     // A big-endian target may also explicitly support unaligned accesses
16130     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
16131       if (Fast)
16132         *Fast = true;
16133       return true;
16134     }
16135   }
16136 
16137   if (!Subtarget->hasMVEIntegerOps())
16138     return false;
16139 
16140   // These are for predicates
16141   if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) {
16142     if (Fast)
16143       *Fast = true;
16144     return true;
16145   }
16146 
16147   // These are for truncated stores/narrowing loads. They are fine so long as
16148   // the alignment is at least the size of the item being loaded
16149   if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) &&
16150       Alignment >= VT.getScalarSizeInBits() / 8) {
16151     if (Fast)
16152       *Fast = true;
16153     return true;
16154   }
16155 
16156   // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
16157   // VSTRW.U32 all store the vector register in exactly the same format, and
16158   // differ only in the range of their immediate offset field and the required
16159   // alignment. So there is always a store that can be used, regardless of
16160   // actual type.
16161   //
16162   // For big endian, that is not the case. But can still emit a (VSTRB.U8;
16163   // VREV64.8) pair and get the same effect. This will likely be better than
16164   // aligning the vector through the stack.
16165   if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 ||
16166       Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 ||
16167       Ty == MVT::v2f64) {
16168     if (Fast)
16169       *Fast = true;
16170     return true;
16171   }
16172 
16173   return false;
16174 }
16175 
16176 
16177 EVT ARMTargetLowering::getOptimalMemOpType(
16178     const MemOp &Op, const AttributeList &FuncAttributes) const {
16179   // See if we can use NEON instructions for this...
16180   if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() &&
16181       !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16182     bool Fast;
16183     if (Op.size() >= 16 &&
16184         (Op.isAligned(Align(16)) ||
16185          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
16186                                          MachineMemOperand::MONone, &Fast) &&
16187           Fast))) {
16188       return MVT::v2f64;
16189     } else if (Op.size() >= 8 &&
16190                (Op.isAligned(Align(8)) ||
16191                 (allowsMisalignedMemoryAccesses(
16192                      MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
16193                  Fast))) {
16194       return MVT::f64;
16195     }
16196   }
16197 
16198   // Let the target-independent logic figure it out.
16199   return MVT::Other;
16200 }
16201 
16202 // 64-bit integers are split into their high and low parts and held in two
16203 // different registers, so the trunc is free since the low register can just
16204 // be used.
16205 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
16206   if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
16207     return false;
16208   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
16209   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
16210   return (SrcBits == 64 && DestBits == 32);
16211 }
16212 
16213 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
16214   if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
16215       !DstVT.isInteger())
16216     return false;
16217   unsigned SrcBits = SrcVT.getSizeInBits();
16218   unsigned DestBits = DstVT.getSizeInBits();
16219   return (SrcBits == 64 && DestBits == 32);
16220 }
16221 
16222 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16223   if (Val.getOpcode() != ISD::LOAD)
16224     return false;
16225 
16226   EVT VT1 = Val.getValueType();
16227   if (!VT1.isSimple() || !VT1.isInteger() ||
16228       !VT2.isSimple() || !VT2.isInteger())
16229     return false;
16230 
16231   switch (VT1.getSimpleVT().SimpleTy) {
16232   default: break;
16233   case MVT::i1:
16234   case MVT::i8:
16235   case MVT::i16:
16236     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
16237     return true;
16238   }
16239 
16240   return false;
16241 }
16242 
16243 bool ARMTargetLowering::isFNegFree(EVT VT) const {
16244   if (!VT.isSimple())
16245     return false;
16246 
16247   // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
16248   // negate values directly (fneg is free). So, we don't want to let the DAG
16249   // combiner rewrite fneg into xors and some other instructions.  For f16 and
16250   // FullFP16 argument passing, some bitcast nodes may be introduced,
16251   // triggering this DAG combine rewrite, so we are avoiding that with this.
16252   switch (VT.getSimpleVT().SimpleTy) {
16253   default: break;
16254   case MVT::f16:
16255     return Subtarget->hasFullFP16();
16256   }
16257 
16258   return false;
16259 }
16260 
16261 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
16262 /// of the vector elements.
16263 static bool areExtractExts(Value *Ext1, Value *Ext2) {
16264   auto areExtDoubled = [](Instruction *Ext) {
16265     return Ext->getType()->getScalarSizeInBits() ==
16266            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
16267   };
16268 
16269   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
16270       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
16271       !areExtDoubled(cast<Instruction>(Ext1)) ||
16272       !areExtDoubled(cast<Instruction>(Ext2)))
16273     return false;
16274 
16275   return true;
16276 }
16277 
16278 /// Check if sinking \p I's operands to I's basic block is profitable, because
16279 /// the operands can be folded into a target instruction, e.g.
16280 /// sext/zext can be folded into vsubl.
16281 bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
16282                                            SmallVectorImpl<Use *> &Ops) const {
16283   if (!I->getType()->isVectorTy())
16284     return false;
16285 
16286   if (Subtarget->hasNEON()) {
16287     switch (I->getOpcode()) {
16288     case Instruction::Sub:
16289     case Instruction::Add: {
16290       if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
16291         return false;
16292       Ops.push_back(&I->getOperandUse(0));
16293       Ops.push_back(&I->getOperandUse(1));
16294       return true;
16295     }
16296     default:
16297       return false;
16298     }
16299   }
16300 
16301   if (!Subtarget->hasMVEIntegerOps())
16302     return false;
16303 
16304   auto IsFMSMul = [&](Instruction *I) {
16305     if (!I->hasOneUse())
16306       return false;
16307     auto *Sub = cast<Instruction>(*I->users().begin());
16308     return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I;
16309   };
16310   auto IsFMS = [&](Instruction *I) {
16311     if (match(I->getOperand(0), m_FNeg(m_Value())) ||
16312         match(I->getOperand(1), m_FNeg(m_Value())))
16313       return true;
16314     return false;
16315   };
16316 
16317   auto IsSinker = [&](Instruction *I, int Operand) {
16318     switch (I->getOpcode()) {
16319     case Instruction::Add:
16320     case Instruction::Mul:
16321     case Instruction::FAdd:
16322     case Instruction::ICmp:
16323     case Instruction::FCmp:
16324       return true;
16325     case Instruction::FMul:
16326       return !IsFMSMul(I);
16327     case Instruction::Sub:
16328     case Instruction::FSub:
16329     case Instruction::Shl:
16330     case Instruction::LShr:
16331     case Instruction::AShr:
16332       return Operand == 1;
16333     case Instruction::Call:
16334       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
16335         switch (II->getIntrinsicID()) {
16336         case Intrinsic::fma:
16337           return !IsFMS(I);
16338         default:
16339           return false;
16340         }
16341       }
16342       return false;
16343     default:
16344       return false;
16345     }
16346   };
16347 
16348   for (auto OpIdx : enumerate(I->operands())) {
16349     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
16350     // Make sure we are not already sinking this operand
16351     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
16352       continue;
16353 
16354     Instruction *Shuffle = Op;
16355     if (Shuffle->getOpcode() == Instruction::BitCast)
16356       Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
16357     // We are looking for a splat that can be sunk.
16358     if (!Shuffle ||
16359         !match(Shuffle, m_Shuffle(
16360                             m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
16361                             m_Undef(), m_ZeroMask())))
16362       continue;
16363     if (!IsSinker(I, OpIdx.index()))
16364       continue;
16365 
16366     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
16367     // and vector registers
16368     for (Use &U : Op->uses()) {
16369       Instruction *Insn = cast<Instruction>(U.getUser());
16370       if (!IsSinker(Insn, U.getOperandNo()))
16371         return false;
16372     }
16373 
16374     Ops.push_back(&Shuffle->getOperandUse(0));
16375     if (Shuffle != Op)
16376       Ops.push_back(&Op->getOperandUse(0));
16377     Ops.push_back(&OpIdx.value());
16378   }
16379   return true;
16380 }
16381 
16382 Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {
16383   if (!Subtarget->hasMVEIntegerOps())
16384     return nullptr;
16385   Type *SVIType = SVI->getType();
16386   Type *ScalarType = SVIType->getScalarType();
16387 
16388   if (ScalarType->isFloatTy())
16389     return Type::getInt32Ty(SVIType->getContext());
16390   if (ScalarType->isHalfTy())
16391     return Type::getInt16Ty(SVIType->getContext());
16392   return nullptr;
16393 }
16394 
16395 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
16396   EVT VT = ExtVal.getValueType();
16397 
16398   if (!isTypeLegal(VT))
16399     return false;
16400 
16401   if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) {
16402     if (Ld->isExpandingLoad())
16403       return false;
16404   }
16405 
16406   if (Subtarget->hasMVEIntegerOps())
16407     return true;
16408 
16409   // Don't create a loadext if we can fold the extension into a wide/long
16410   // instruction.
16411   // If there's more than one user instruction, the loadext is desirable no
16412   // matter what.  There can be two uses by the same instruction.
16413   if (ExtVal->use_empty() ||
16414       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
16415     return true;
16416 
16417   SDNode *U = *ExtVal->use_begin();
16418   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
16419        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM))
16420     return false;
16421 
16422   return true;
16423 }
16424 
16425 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
16426   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16427     return false;
16428 
16429   if (!isTypeLegal(EVT::getEVT(Ty1)))
16430     return false;
16431 
16432   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
16433 
16434   // Assuming the caller doesn't have a zeroext or signext return parameter,
16435   // truncation all the way down to i1 is valid.
16436   return true;
16437 }
16438 
16439 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
16440                                                 const AddrMode &AM, Type *Ty,
16441                                                 unsigned AS) const {
16442   if (isLegalAddressingMode(DL, AM, Ty, AS)) {
16443     if (Subtarget->hasFPAO())
16444       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
16445     return 0;
16446   }
16447   return -1;
16448 }
16449 
16450 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
16451 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
16452 /// expanded to FMAs when this method returns true, otherwise fmuladd is
16453 /// expanded to fmul + fadd.
16454 ///
16455 /// ARM supports both fused and unfused multiply-add operations; we already
16456 /// lower a pair of fmul and fadd to the latter so it's not clear that there
16457 /// would be a gain or that the gain would be worthwhile enough to risk
16458 /// correctness bugs.
16459 ///
16460 /// For MVE, we set this to true as it helps simplify the need for some
16461 /// patterns (and we don't have the non-fused floating point instruction).
16462 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16463                                                    EVT VT) const {
16464   if (!VT.isSimple())
16465     return false;
16466 
16467   switch (VT.getSimpleVT().SimpleTy) {
16468   case MVT::v4f32:
16469   case MVT::v8f16:
16470     return Subtarget->hasMVEFloatOps();
16471   case MVT::f16:
16472     return Subtarget->useFPVFMx16();
16473   case MVT::f32:
16474     return Subtarget->useFPVFMx();
16475   case MVT::f64:
16476     return Subtarget->useFPVFMx64();
16477   default:
16478     break;
16479   }
16480 
16481   return false;
16482 }
16483 
16484 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
16485   if (V < 0)
16486     return false;
16487 
16488   unsigned Scale = 1;
16489   switch (VT.getSimpleVT().SimpleTy) {
16490   case MVT::i1:
16491   case MVT::i8:
16492     // Scale == 1;
16493     break;
16494   case MVT::i16:
16495     // Scale == 2;
16496     Scale = 2;
16497     break;
16498   default:
16499     // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
16500     // Scale == 4;
16501     Scale = 4;
16502     break;
16503   }
16504 
16505   if ((V & (Scale - 1)) != 0)
16506     return false;
16507   return isUInt<5>(V / Scale);
16508 }
16509 
16510 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
16511                                       const ARMSubtarget *Subtarget) {
16512   if (!VT.isInteger() && !VT.isFloatingPoint())
16513     return false;
16514   if (VT.isVector() && Subtarget->hasNEON())
16515     return false;
16516   if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
16517       !Subtarget->hasMVEFloatOps())
16518     return false;
16519 
16520   bool IsNeg = false;
16521   if (V < 0) {
16522     IsNeg = true;
16523     V = -V;
16524   }
16525 
16526   unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U);
16527 
16528   // MVE: size * imm7
16529   if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
16530     switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
16531     case MVT::i32:
16532     case MVT::f32:
16533       return isShiftedUInt<7,2>(V);
16534     case MVT::i16:
16535     case MVT::f16:
16536       return isShiftedUInt<7,1>(V);
16537     case MVT::i8:
16538       return isUInt<7>(V);
16539     default:
16540       return false;
16541     }
16542   }
16543 
16544   // half VLDR: 2 * imm8
16545   if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
16546     return isShiftedUInt<8, 1>(V);
16547   // VLDR and LDRD: 4 * imm8
16548   if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
16549     return isShiftedUInt<8, 2>(V);
16550 
16551   if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
16552     // + imm12 or - imm8
16553     if (IsNeg)
16554       return isUInt<8>(V);
16555     return isUInt<12>(V);
16556   }
16557 
16558   return false;
16559 }
16560 
16561 /// isLegalAddressImmediate - Return true if the integer value can be used
16562 /// as the offset of the target addressing mode for load / store of the
16563 /// given type.
16564 static bool isLegalAddressImmediate(int64_t V, EVT VT,
16565                                     const ARMSubtarget *Subtarget) {
16566   if (V == 0)
16567     return true;
16568 
16569   if (!VT.isSimple())
16570     return false;
16571 
16572   if (Subtarget->isThumb1Only())
16573     return isLegalT1AddressImmediate(V, VT);
16574   else if (Subtarget->isThumb2())
16575     return isLegalT2AddressImmediate(V, VT, Subtarget);
16576 
16577   // ARM mode.
16578   if (V < 0)
16579     V = - V;
16580   switch (VT.getSimpleVT().SimpleTy) {
16581   default: return false;
16582   case MVT::i1:
16583   case MVT::i8:
16584   case MVT::i32:
16585     // +- imm12
16586     return isUInt<12>(V);
16587   case MVT::i16:
16588     // +- imm8
16589     return isUInt<8>(V);
16590   case MVT::f32:
16591   case MVT::f64:
16592     if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
16593       return false;
16594     return isShiftedUInt<8, 2>(V);
16595   }
16596 }
16597 
16598 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
16599                                                       EVT VT) const {
16600   int Scale = AM.Scale;
16601   if (Scale < 0)
16602     return false;
16603 
16604   switch (VT.getSimpleVT().SimpleTy) {
16605   default: return false;
16606   case MVT::i1:
16607   case MVT::i8:
16608   case MVT::i16:
16609   case MVT::i32:
16610     if (Scale == 1)
16611       return true;
16612     // r + r << imm
16613     Scale = Scale & ~1;
16614     return Scale == 2 || Scale == 4 || Scale == 8;
16615   case MVT::i64:
16616     // FIXME: What are we trying to model here? ldrd doesn't have an r + r
16617     // version in Thumb mode.
16618     // r + r
16619     if (Scale == 1)
16620       return true;
16621     // r * 2 (this can be lowered to r + r).
16622     if (!AM.HasBaseReg && Scale == 2)
16623       return true;
16624     return false;
16625   case MVT::isVoid:
16626     // Note, we allow "void" uses (basically, uses that aren't loads or
16627     // stores), because arm allows folding a scale into many arithmetic
16628     // operations.  This should be made more precise and revisited later.
16629 
16630     // Allow r << imm, but the imm has to be a multiple of two.
16631     if (Scale & 1) return false;
16632     return isPowerOf2_32(Scale);
16633   }
16634 }
16635 
16636 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
16637                                                       EVT VT) const {
16638   const int Scale = AM.Scale;
16639 
16640   // Negative scales are not supported in Thumb1.
16641   if (Scale < 0)
16642     return false;
16643 
16644   // Thumb1 addressing modes do not support register scaling excepting the
16645   // following cases:
16646   // 1. Scale == 1 means no scaling.
16647   // 2. Scale == 2 this can be lowered to r + r if there is no base register.
16648   return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
16649 }
16650 
16651 /// isLegalAddressingMode - Return true if the addressing mode represented
16652 /// by AM is legal for this target, for a load/store of the specified type.
16653 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
16654                                               const AddrMode &AM, Type *Ty,
16655                                               unsigned AS, Instruction *I) const {
16656   EVT VT = getValueType(DL, Ty, true);
16657   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
16658     return false;
16659 
16660   // Can never fold addr of global into load/store.
16661   if (AM.BaseGV)
16662     return false;
16663 
16664   switch (AM.Scale) {
16665   case 0:  // no scale reg, must be "r+i" or "r", or "i".
16666     break;
16667   default:
16668     // ARM doesn't support any R+R*scale+imm addr modes.
16669     if (AM.BaseOffs)
16670       return false;
16671 
16672     if (!VT.isSimple())
16673       return false;
16674 
16675     if (Subtarget->isThumb1Only())
16676       return isLegalT1ScaledAddressingMode(AM, VT);
16677 
16678     if (Subtarget->isThumb2())
16679       return isLegalT2ScaledAddressingMode(AM, VT);
16680 
16681     int Scale = AM.Scale;
16682     switch (VT.getSimpleVT().SimpleTy) {
16683     default: return false;
16684     case MVT::i1:
16685     case MVT::i8:
16686     case MVT::i32:
16687       if (Scale < 0) Scale = -Scale;
16688       if (Scale == 1)
16689         return true;
16690       // r + r << imm
16691       return isPowerOf2_32(Scale & ~1);
16692     case MVT::i16:
16693     case MVT::i64:
16694       // r +/- r
16695       if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
16696         return true;
16697       // r * 2 (this can be lowered to r + r).
16698       if (!AM.HasBaseReg && Scale == 2)
16699         return true;
16700       return false;
16701 
16702     case MVT::isVoid:
16703       // Note, we allow "void" uses (basically, uses that aren't loads or
16704       // stores), because arm allows folding a scale into many arithmetic
16705       // operations.  This should be made more precise and revisited later.
16706 
16707       // Allow r << imm, but the imm has to be a multiple of two.
16708       if (Scale & 1) return false;
16709       return isPowerOf2_32(Scale);
16710     }
16711   }
16712   return true;
16713 }
16714 
16715 /// isLegalICmpImmediate - Return true if the specified immediate is legal
16716 /// icmp immediate, that is the target has icmp instructions which can compare
16717 /// a register against the immediate without having to materialize the
16718 /// immediate into a register.
16719 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16720   // Thumb2 and ARM modes can use cmn for negative immediates.
16721   if (!Subtarget->isThumb())
16722     return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
16723            ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
16724   if (Subtarget->isThumb2())
16725     return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
16726            ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
16727   // Thumb1 doesn't have cmn, and only 8-bit immediates.
16728   return Imm >= 0 && Imm <= 255;
16729 }
16730 
16731 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
16732 /// *or sub* immediate, that is the target has add or sub instructions which can
16733 /// add a register with the immediate without having to materialize the
16734 /// immediate into a register.
16735 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16736   // Same encoding for add/sub, just flip the sign.
16737   int64_t AbsImm = std::abs(Imm);
16738   if (!Subtarget->isThumb())
16739     return ARM_AM::getSOImmVal(AbsImm) != -1;
16740   if (Subtarget->isThumb2())
16741     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
16742   // Thumb1 only has 8-bit unsigned immediate.
16743   return AbsImm >= 0 && AbsImm <= 255;
16744 }
16745 
16746 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
16747                                       bool isSEXTLoad, SDValue &Base,
16748                                       SDValue &Offset, bool &isInc,
16749                                       SelectionDAG &DAG) {
16750   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16751     return false;
16752 
16753   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
16754     // AddressingMode 3
16755     Base = Ptr->getOperand(0);
16756     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16757       int RHSC = (int)RHS->getZExtValue();
16758       if (RHSC < 0 && RHSC > -256) {
16759         assert(Ptr->getOpcode() == ISD::ADD);
16760         isInc = false;
16761         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16762         return true;
16763       }
16764     }
16765     isInc = (Ptr->getOpcode() == ISD::ADD);
16766     Offset = Ptr->getOperand(1);
16767     return true;
16768   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
16769     // AddressingMode 2
16770     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16771       int RHSC = (int)RHS->getZExtValue();
16772       if (RHSC < 0 && RHSC > -0x1000) {
16773         assert(Ptr->getOpcode() == ISD::ADD);
16774         isInc = false;
16775         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16776         Base = Ptr->getOperand(0);
16777         return true;
16778       }
16779     }
16780 
16781     if (Ptr->getOpcode() == ISD::ADD) {
16782       isInc = true;
16783       ARM_AM::ShiftOpc ShOpcVal=
16784         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
16785       if (ShOpcVal != ARM_AM::no_shift) {
16786         Base = Ptr->getOperand(1);
16787         Offset = Ptr->getOperand(0);
16788       } else {
16789         Base = Ptr->getOperand(0);
16790         Offset = Ptr->getOperand(1);
16791       }
16792       return true;
16793     }
16794 
16795     isInc = (Ptr->getOpcode() == ISD::ADD);
16796     Base = Ptr->getOperand(0);
16797     Offset = Ptr->getOperand(1);
16798     return true;
16799   }
16800 
16801   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
16802   return false;
16803 }
16804 
16805 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
16806                                      bool isSEXTLoad, SDValue &Base,
16807                                      SDValue &Offset, bool &isInc,
16808                                      SelectionDAG &DAG) {
16809   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16810     return false;
16811 
16812   Base = Ptr->getOperand(0);
16813   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16814     int RHSC = (int)RHS->getZExtValue();
16815     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
16816       assert(Ptr->getOpcode() == ISD::ADD);
16817       isInc = false;
16818       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16819       return true;
16820     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
16821       isInc = Ptr->getOpcode() == ISD::ADD;
16822       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
16823       return true;
16824     }
16825   }
16826 
16827   return false;
16828 }
16829 
16830 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
16831                                       bool isSEXTLoad, bool IsMasked, bool isLE,
16832                                       SDValue &Base, SDValue &Offset,
16833                                       bool &isInc, SelectionDAG &DAG) {
16834   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16835     return false;
16836   if (!isa<ConstantSDNode>(Ptr->getOperand(1)))
16837     return false;
16838 
16839   // We allow LE non-masked loads to change the type (for example use a vldrb.8
16840   // as opposed to a vldrw.32). This can allow extra addressing modes or
16841   // alignments for what is otherwise an equivalent instruction.
16842   bool CanChangeType = isLE && !IsMasked;
16843 
16844   ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1));
16845   int RHSC = (int)RHS->getZExtValue();
16846 
16847   auto IsInRange = [&](int RHSC, int Limit, int Scale) {
16848     if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
16849       assert(Ptr->getOpcode() == ISD::ADD);
16850       isInc = false;
16851       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16852       return true;
16853     } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
16854       isInc = Ptr->getOpcode() == ISD::ADD;
16855       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
16856       return true;
16857     }
16858     return false;
16859   };
16860 
16861   // Try to find a matching instruction based on s/zext, Alignment, Offset and
16862   // (in BE/masked) type.
16863   Base = Ptr->getOperand(0);
16864   if (VT == MVT::v4i16) {
16865     if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2))
16866       return true;
16867   } else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
16868     if (IsInRange(RHSC, 0x80, 1))
16869       return true;
16870   } else if (Alignment >= 4 &&
16871              (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
16872              IsInRange(RHSC, 0x80, 4))
16873     return true;
16874   else if (Alignment >= 2 &&
16875            (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
16876            IsInRange(RHSC, 0x80, 2))
16877     return true;
16878   else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
16879     return true;
16880   return false;
16881 }
16882 
16883 /// getPreIndexedAddressParts - returns true by value, base pointer and
16884 /// offset pointer and addressing mode by reference if the node's address
16885 /// can be legally represented as pre-indexed load / store address.
16886 bool
16887 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
16888                                              SDValue &Offset,
16889                                              ISD::MemIndexedMode &AM,
16890                                              SelectionDAG &DAG) const {
16891   if (Subtarget->isThumb1Only())
16892     return false;
16893 
16894   EVT VT;
16895   SDValue Ptr;
16896   Align Alignment;
16897   bool isSEXTLoad = false;
16898   bool IsMasked = false;
16899   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
16900     Ptr = LD->getBasePtr();
16901     VT = LD->getMemoryVT();
16902     Alignment = LD->getAlign();
16903     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16904   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
16905     Ptr = ST->getBasePtr();
16906     VT = ST->getMemoryVT();
16907     Alignment = ST->getAlign();
16908   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
16909     Ptr = LD->getBasePtr();
16910     VT = LD->getMemoryVT();
16911     Alignment = LD->getAlign();
16912     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16913     IsMasked = true;
16914   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
16915     Ptr = ST->getBasePtr();
16916     VT = ST->getMemoryVT();
16917     Alignment = ST->getAlign();
16918     IsMasked = true;
16919   } else
16920     return false;
16921 
16922   bool isInc;
16923   bool isLegal = false;
16924   if (VT.isVector())
16925     isLegal = Subtarget->hasMVEIntegerOps() &&
16926               getMVEIndexedAddressParts(
16927                   Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked,
16928                   Subtarget->isLittle(), Base, Offset, isInc, DAG);
16929   else {
16930     if (Subtarget->isThumb2())
16931       isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
16932                                          Offset, isInc, DAG);
16933     else
16934       isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
16935                                           Offset, isInc, DAG);
16936   }
16937   if (!isLegal)
16938     return false;
16939 
16940   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
16941   return true;
16942 }
16943 
16944 /// getPostIndexedAddressParts - returns true by value, base pointer and
16945 /// offset pointer and addressing mode by reference if this node can be
16946 /// combined with a load / store to form a post-indexed load / store.
16947 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
16948                                                    SDValue &Base,
16949                                                    SDValue &Offset,
16950                                                    ISD::MemIndexedMode &AM,
16951                                                    SelectionDAG &DAG) const {
16952   EVT VT;
16953   SDValue Ptr;
16954   Align Alignment;
16955   bool isSEXTLoad = false, isNonExt;
16956   bool IsMasked = false;
16957   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
16958     VT = LD->getMemoryVT();
16959     Ptr = LD->getBasePtr();
16960     Alignment = LD->getAlign();
16961     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16962     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
16963   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
16964     VT = ST->getMemoryVT();
16965     Ptr = ST->getBasePtr();
16966     Alignment = ST->getAlign();
16967     isNonExt = !ST->isTruncatingStore();
16968   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
16969     VT = LD->getMemoryVT();
16970     Ptr = LD->getBasePtr();
16971     Alignment = LD->getAlign();
16972     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16973     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
16974     IsMasked = true;
16975   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
16976     VT = ST->getMemoryVT();
16977     Ptr = ST->getBasePtr();
16978     Alignment = ST->getAlign();
16979     isNonExt = !ST->isTruncatingStore();
16980     IsMasked = true;
16981   } else
16982     return false;
16983 
16984   if (Subtarget->isThumb1Only()) {
16985     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
16986     // must be non-extending/truncating, i32, with an offset of 4.
16987     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
16988     if (Op->getOpcode() != ISD::ADD || !isNonExt)
16989       return false;
16990     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
16991     if (!RHS || RHS->getZExtValue() != 4)
16992       return false;
16993 
16994     Offset = Op->getOperand(1);
16995     Base = Op->getOperand(0);
16996     AM = ISD::POST_INC;
16997     return true;
16998   }
16999 
17000   bool isInc;
17001   bool isLegal = false;
17002   if (VT.isVector())
17003     isLegal = Subtarget->hasMVEIntegerOps() &&
17004               getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked,
17005                                         Subtarget->isLittle(), Base, Offset,
17006                                         isInc, DAG);
17007   else {
17008     if (Subtarget->isThumb2())
17009       isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
17010                                          isInc, DAG);
17011     else
17012       isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
17013                                           isInc, DAG);
17014   }
17015   if (!isLegal)
17016     return false;
17017 
17018   if (Ptr != Base) {
17019     // Swap base ptr and offset to catch more post-index load / store when
17020     // it's legal. In Thumb2 mode, offset must be an immediate.
17021     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
17022         !Subtarget->isThumb2())
17023       std::swap(Base, Offset);
17024 
17025     // Post-indexed load / store update the base pointer.
17026     if (Ptr != Base)
17027       return false;
17028   }
17029 
17030   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
17031   return true;
17032 }
17033 
17034 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
17035                                                       KnownBits &Known,
17036                                                       const APInt &DemandedElts,
17037                                                       const SelectionDAG &DAG,
17038                                                       unsigned Depth) const {
17039   unsigned BitWidth = Known.getBitWidth();
17040   Known.resetAll();
17041   switch (Op.getOpcode()) {
17042   default: break;
17043   case ARMISD::ADDC:
17044   case ARMISD::ADDE:
17045   case ARMISD::SUBC:
17046   case ARMISD::SUBE:
17047     // Special cases when we convert a carry to a boolean.
17048     if (Op.getResNo() == 0) {
17049       SDValue LHS = Op.getOperand(0);
17050       SDValue RHS = Op.getOperand(1);
17051       // (ADDE 0, 0, C) will give us a single bit.
17052       if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
17053           isNullConstant(RHS)) {
17054         Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
17055         return;
17056       }
17057     }
17058     break;
17059   case ARMISD::CMOV: {
17060     // Bits are known zero/one if known on the LHS and RHS.
17061     Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
17062     if (Known.isUnknown())
17063       return;
17064 
17065     KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
17066     Known.Zero &= KnownRHS.Zero;
17067     Known.One  &= KnownRHS.One;
17068     return;
17069   }
17070   case ISD::INTRINSIC_W_CHAIN: {
17071     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
17072     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
17073     switch (IntID) {
17074     default: return;
17075     case Intrinsic::arm_ldaex:
17076     case Intrinsic::arm_ldrex: {
17077       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
17078       unsigned MemBits = VT.getScalarSizeInBits();
17079       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
17080       return;
17081     }
17082     }
17083   }
17084   case ARMISD::BFI: {
17085     // Conservatively, we can recurse down the first operand
17086     // and just mask out all affected bits.
17087     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17088 
17089     // The operand to BFI is already a mask suitable for removing the bits it
17090     // sets.
17091     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
17092     const APInt &Mask = CI->getAPIntValue();
17093     Known.Zero &= Mask;
17094     Known.One &= Mask;
17095     return;
17096   }
17097   case ARMISD::VGETLANEs:
17098   case ARMISD::VGETLANEu: {
17099     const SDValue &SrcSV = Op.getOperand(0);
17100     EVT VecVT = SrcSV.getValueType();
17101     assert(VecVT.isVector() && "VGETLANE expected a vector type");
17102     const unsigned NumSrcElts = VecVT.getVectorNumElements();
17103     ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
17104     assert(Pos->getAPIntValue().ult(NumSrcElts) &&
17105            "VGETLANE index out of bounds");
17106     unsigned Idx = Pos->getZExtValue();
17107     APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
17108     Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
17109 
17110     EVT VT = Op.getValueType();
17111     const unsigned DstSz = VT.getScalarSizeInBits();
17112     const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
17113     (void)SrcSz;
17114     assert(SrcSz == Known.getBitWidth());
17115     assert(DstSz > SrcSz);
17116     if (Op.getOpcode() == ARMISD::VGETLANEs)
17117       Known = Known.sext(DstSz);
17118     else {
17119       Known = Known.zext(DstSz);
17120     }
17121     assert(DstSz == Known.getBitWidth());
17122     break;
17123   }
17124   case ARMISD::VMOVrh: {
17125     KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
17126     assert(KnownOp.getBitWidth() == 16);
17127     Known = KnownOp.zext(32);
17128     break;
17129   }
17130   }
17131 }
17132 
17133 bool ARMTargetLowering::targetShrinkDemandedConstant(
17134     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
17135     TargetLoweringOpt &TLO) const {
17136   // Delay optimization, so we don't have to deal with illegal types, or block
17137   // optimizations.
17138   if (!TLO.LegalOps)
17139     return false;
17140 
17141   // Only optimize AND for now.
17142   if (Op.getOpcode() != ISD::AND)
17143     return false;
17144 
17145   EVT VT = Op.getValueType();
17146 
17147   // Ignore vectors.
17148   if (VT.isVector())
17149     return false;
17150 
17151   assert(VT == MVT::i32 && "Unexpected integer type");
17152 
17153   // Make sure the RHS really is a constant.
17154   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
17155   if (!C)
17156     return false;
17157 
17158   unsigned Mask = C->getZExtValue();
17159 
17160   unsigned Demanded = DemandedBits.getZExtValue();
17161   unsigned ShrunkMask = Mask & Demanded;
17162   unsigned ExpandedMask = Mask | ~Demanded;
17163 
17164   // If the mask is all zeros, let the target-independent code replace the
17165   // result with zero.
17166   if (ShrunkMask == 0)
17167     return false;
17168 
17169   // If the mask is all ones, erase the AND. (Currently, the target-independent
17170   // code won't do this, so we have to do it explicitly to avoid an infinite
17171   // loop in obscure cases.)
17172   if (ExpandedMask == ~0U)
17173     return TLO.CombineTo(Op, Op.getOperand(0));
17174 
17175   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
17176     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
17177   };
17178   auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
17179     if (NewMask == Mask)
17180       return true;
17181     SDLoc DL(Op);
17182     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
17183     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
17184     return TLO.CombineTo(Op, NewOp);
17185   };
17186 
17187   // Prefer uxtb mask.
17188   if (IsLegalMask(0xFF))
17189     return UseMask(0xFF);
17190 
17191   // Prefer uxth mask.
17192   if (IsLegalMask(0xFFFF))
17193     return UseMask(0xFFFF);
17194 
17195   // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
17196   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
17197   if (ShrunkMask < 256)
17198     return UseMask(ShrunkMask);
17199 
17200   // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
17201   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
17202   if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
17203     return UseMask(ExpandedMask);
17204 
17205   // Potential improvements:
17206   //
17207   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
17208   // We could try to prefer Thumb1 immediates which can be lowered to a
17209   // two-instruction sequence.
17210   // We could try to recognize more legal ARM/Thumb2 immediates here.
17211 
17212   return false;
17213 }
17214 
17215 bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
17216     SDValue Op, const APInt &OriginalDemandedBits,
17217     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
17218     unsigned Depth) const {
17219   unsigned Opc = Op.getOpcode();
17220 
17221   switch (Opc) {
17222   case ARMISD::ASRL:
17223   case ARMISD::LSRL: {
17224     // If this is result 0 and the other result is unused, see if the demand
17225     // bits allow us to shrink this long shift into a standard small shift in
17226     // the opposite direction.
17227     if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
17228         isa<ConstantSDNode>(Op->getOperand(2))) {
17229       unsigned ShAmt = Op->getConstantOperandVal(2);
17230       if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(
17231                             APInt::getAllOnesValue(32) << (32 - ShAmt)))
17232         return TLO.CombineTo(
17233             Op, TLO.DAG.getNode(
17234                     ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),
17235                     TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32)));
17236     }
17237     break;
17238   }
17239   }
17240 
17241   return TargetLowering::SimplifyDemandedBitsForTargetNode(
17242       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
17243 }
17244 
17245 //===----------------------------------------------------------------------===//
17246 //                           ARM Inline Assembly Support
17247 //===----------------------------------------------------------------------===//
17248 
17249 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
17250   // Looking for "rev" which is V6+.
17251   if (!Subtarget->hasV6Ops())
17252     return false;
17253 
17254   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
17255   std::string AsmStr = IA->getAsmString();
17256   SmallVector<StringRef, 4> AsmPieces;
17257   SplitString(AsmStr, AsmPieces, ";\n");
17258 
17259   switch (AsmPieces.size()) {
17260   default: return false;
17261   case 1:
17262     AsmStr = std::string(AsmPieces[0]);
17263     AsmPieces.clear();
17264     SplitString(AsmStr, AsmPieces, " \t,");
17265 
17266     // rev $0, $1
17267     if (AsmPieces.size() == 3 &&
17268         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
17269         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
17270       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
17271       if (Ty && Ty->getBitWidth() == 32)
17272         return IntrinsicLowering::LowerToByteSwap(CI);
17273     }
17274     break;
17275   }
17276 
17277   return false;
17278 }
17279 
17280 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
17281   // At this point, we have to lower this constraint to something else, so we
17282   // lower it to an "r" or "w". However, by doing this we will force the result
17283   // to be in register, while the X constraint is much more permissive.
17284   //
17285   // Although we are correct (we are free to emit anything, without
17286   // constraints), we might break use cases that would expect us to be more
17287   // efficient and emit something else.
17288   if (!Subtarget->hasVFP2Base())
17289     return "r";
17290   if (ConstraintVT.isFloatingPoint())
17291     return "w";
17292   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
17293      (ConstraintVT.getSizeInBits() == 64 ||
17294       ConstraintVT.getSizeInBits() == 128))
17295     return "w";
17296 
17297   return "r";
17298 }
17299 
17300 /// getConstraintType - Given a constraint letter, return the type of
17301 /// constraint it is for this target.
17302 ARMTargetLowering::ConstraintType
17303 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
17304   unsigned S = Constraint.size();
17305   if (S == 1) {
17306     switch (Constraint[0]) {
17307     default:  break;
17308     case 'l': return C_RegisterClass;
17309     case 'w': return C_RegisterClass;
17310     case 'h': return C_RegisterClass;
17311     case 'x': return C_RegisterClass;
17312     case 't': return C_RegisterClass;
17313     case 'j': return C_Immediate; // Constant for movw.
17314     // An address with a single base register. Due to the way we
17315     // currently handle addresses it is the same as an 'r' memory constraint.
17316     case 'Q': return C_Memory;
17317     }
17318   } else if (S == 2) {
17319     switch (Constraint[0]) {
17320     default: break;
17321     case 'T': return C_RegisterClass;
17322     // All 'U+' constraints are addresses.
17323     case 'U': return C_Memory;
17324     }
17325   }
17326   return TargetLowering::getConstraintType(Constraint);
17327 }
17328 
17329 /// Examine constraint type and operand type and determine a weight value.
17330 /// This object must already have been set up with the operand type
17331 /// and the current alternative constraint selected.
17332 TargetLowering::ConstraintWeight
17333 ARMTargetLowering::getSingleConstraintMatchWeight(
17334     AsmOperandInfo &info, const char *constraint) const {
17335   ConstraintWeight weight = CW_Invalid;
17336   Value *CallOperandVal = info.CallOperandVal;
17337     // If we don't have a value, we can't do a match,
17338     // but allow it at the lowest weight.
17339   if (!CallOperandVal)
17340     return CW_Default;
17341   Type *type = CallOperandVal->getType();
17342   // Look at the constraint type.
17343   switch (*constraint) {
17344   default:
17345     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
17346     break;
17347   case 'l':
17348     if (type->isIntegerTy()) {
17349       if (Subtarget->isThumb())
17350         weight = CW_SpecificReg;
17351       else
17352         weight = CW_Register;
17353     }
17354     break;
17355   case 'w':
17356     if (type->isFloatingPointTy())
17357       weight = CW_Register;
17358     break;
17359   }
17360   return weight;
17361 }
17362 
17363 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
17364 
17365 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
17366     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
17367   switch (Constraint.size()) {
17368   case 1:
17369     // GCC ARM Constraint Letters
17370     switch (Constraint[0]) {
17371     case 'l': // Low regs or general regs.
17372       if (Subtarget->isThumb())
17373         return RCPair(0U, &ARM::tGPRRegClass);
17374       return RCPair(0U, &ARM::GPRRegClass);
17375     case 'h': // High regs or no regs.
17376       if (Subtarget->isThumb())
17377         return RCPair(0U, &ARM::hGPRRegClass);
17378       break;
17379     case 'r':
17380       if (Subtarget->isThumb1Only())
17381         return RCPair(0U, &ARM::tGPRRegClass);
17382       return RCPair(0U, &ARM::GPRRegClass);
17383     case 'w':
17384       if (VT == MVT::Other)
17385         break;
17386       if (VT == MVT::f32)
17387         return RCPair(0U, &ARM::SPRRegClass);
17388       if (VT.getSizeInBits() == 64)
17389         return RCPair(0U, &ARM::DPRRegClass);
17390       if (VT.getSizeInBits() == 128)
17391         return RCPair(0U, &ARM::QPRRegClass);
17392       break;
17393     case 'x':
17394       if (VT == MVT::Other)
17395         break;
17396       if (VT == MVT::f32)
17397         return RCPair(0U, &ARM::SPR_8RegClass);
17398       if (VT.getSizeInBits() == 64)
17399         return RCPair(0U, &ARM::DPR_8RegClass);
17400       if (VT.getSizeInBits() == 128)
17401         return RCPair(0U, &ARM::QPR_8RegClass);
17402       break;
17403     case 't':
17404       if (VT == MVT::Other)
17405         break;
17406       if (VT == MVT::f32 || VT == MVT::i32)
17407         return RCPair(0U, &ARM::SPRRegClass);
17408       if (VT.getSizeInBits() == 64)
17409         return RCPair(0U, &ARM::DPR_VFP2RegClass);
17410       if (VT.getSizeInBits() == 128)
17411         return RCPair(0U, &ARM::QPR_VFP2RegClass);
17412       break;
17413     }
17414     break;
17415 
17416   case 2:
17417     if (Constraint[0] == 'T') {
17418       switch (Constraint[1]) {
17419       default:
17420         break;
17421       case 'e':
17422         return RCPair(0U, &ARM::tGPREvenRegClass);
17423       case 'o':
17424         return RCPair(0U, &ARM::tGPROddRegClass);
17425       }
17426     }
17427     break;
17428 
17429   default:
17430     break;
17431   }
17432 
17433   if (StringRef("{cc}").equals_lower(Constraint))
17434     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
17435 
17436   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
17437 }
17438 
17439 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
17440 /// vector.  If it is invalid, don't add anything to Ops.
17441 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
17442                                                      std::string &Constraint,
17443                                                      std::vector<SDValue>&Ops,
17444                                                      SelectionDAG &DAG) const {
17445   SDValue Result;
17446 
17447   // Currently only support length 1 constraints.
17448   if (Constraint.length() != 1) return;
17449 
17450   char ConstraintLetter = Constraint[0];
17451   switch (ConstraintLetter) {
17452   default: break;
17453   case 'j':
17454   case 'I': case 'J': case 'K': case 'L':
17455   case 'M': case 'N': case 'O':
17456     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
17457     if (!C)
17458       return;
17459 
17460     int64_t CVal64 = C->getSExtValue();
17461     int CVal = (int) CVal64;
17462     // None of these constraints allow values larger than 32 bits.  Check
17463     // that the value fits in an int.
17464     if (CVal != CVal64)
17465       return;
17466 
17467     switch (ConstraintLetter) {
17468       case 'j':
17469         // Constant suitable for movw, must be between 0 and
17470         // 65535.
17471         if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps()))
17472           if (CVal >= 0 && CVal <= 65535)
17473             break;
17474         return;
17475       case 'I':
17476         if (Subtarget->isThumb1Only()) {
17477           // This must be a constant between 0 and 255, for ADD
17478           // immediates.
17479           if (CVal >= 0 && CVal <= 255)
17480             break;
17481         } else if (Subtarget->isThumb2()) {
17482           // A constant that can be used as an immediate value in a
17483           // data-processing instruction.
17484           if (ARM_AM::getT2SOImmVal(CVal) != -1)
17485             break;
17486         } else {
17487           // A constant that can be used as an immediate value in a
17488           // data-processing instruction.
17489           if (ARM_AM::getSOImmVal(CVal) != -1)
17490             break;
17491         }
17492         return;
17493 
17494       case 'J':
17495         if (Subtarget->isThumb1Only()) {
17496           // This must be a constant between -255 and -1, for negated ADD
17497           // immediates. This can be used in GCC with an "n" modifier that
17498           // prints the negated value, for use with SUB instructions. It is
17499           // not useful otherwise but is implemented for compatibility.
17500           if (CVal >= -255 && CVal <= -1)
17501             break;
17502         } else {
17503           // This must be a constant between -4095 and 4095. It is not clear
17504           // what this constraint is intended for. Implemented for
17505           // compatibility with GCC.
17506           if (CVal >= -4095 && CVal <= 4095)
17507             break;
17508         }
17509         return;
17510 
17511       case 'K':
17512         if (Subtarget->isThumb1Only()) {
17513           // A 32-bit value where only one byte has a nonzero value. Exclude
17514           // zero to match GCC. This constraint is used by GCC internally for
17515           // constants that can be loaded with a move/shift combination.
17516           // It is not useful otherwise but is implemented for compatibility.
17517           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
17518             break;
17519         } else if (Subtarget->isThumb2()) {
17520           // A constant whose bitwise inverse can be used as an immediate
17521           // value in a data-processing instruction. This can be used in GCC
17522           // with a "B" modifier that prints the inverted value, for use with
17523           // BIC and MVN instructions. It is not useful otherwise but is
17524           // implemented for compatibility.
17525           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
17526             break;
17527         } else {
17528           // A constant whose bitwise inverse can be used as an immediate
17529           // value in a data-processing instruction. This can be used in GCC
17530           // with a "B" modifier that prints the inverted value, for use with
17531           // BIC and MVN instructions. It is not useful otherwise but is
17532           // implemented for compatibility.
17533           if (ARM_AM::getSOImmVal(~CVal) != -1)
17534             break;
17535         }
17536         return;
17537 
17538       case 'L':
17539         if (Subtarget->isThumb1Only()) {
17540           // This must be a constant between -7 and 7,
17541           // for 3-operand ADD/SUB immediate instructions.
17542           if (CVal >= -7 && CVal < 7)
17543             break;
17544         } else if (Subtarget->isThumb2()) {
17545           // A constant whose negation can be used as an immediate value in a
17546           // data-processing instruction. This can be used in GCC with an "n"
17547           // modifier that prints the negated value, for use with SUB
17548           // instructions. It is not useful otherwise but is implemented for
17549           // compatibility.
17550           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
17551             break;
17552         } else {
17553           // A constant whose negation can be used as an immediate value in a
17554           // data-processing instruction. This can be used in GCC with an "n"
17555           // modifier that prints the negated value, for use with SUB
17556           // instructions. It is not useful otherwise but is implemented for
17557           // compatibility.
17558           if (ARM_AM::getSOImmVal(-CVal) != -1)
17559             break;
17560         }
17561         return;
17562 
17563       case 'M':
17564         if (Subtarget->isThumb1Only()) {
17565           // This must be a multiple of 4 between 0 and 1020, for
17566           // ADD sp + immediate.
17567           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
17568             break;
17569         } else {
17570           // A power of two or a constant between 0 and 32.  This is used in
17571           // GCC for the shift amount on shifted register operands, but it is
17572           // useful in general for any shift amounts.
17573           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
17574             break;
17575         }
17576         return;
17577 
17578       case 'N':
17579         if (Subtarget->isThumb1Only()) {
17580           // This must be a constant between 0 and 31, for shift amounts.
17581           if (CVal >= 0 && CVal <= 31)
17582             break;
17583         }
17584         return;
17585 
17586       case 'O':
17587         if (Subtarget->isThumb1Only()) {
17588           // This must be a multiple of 4 between -508 and 508, for
17589           // ADD/SUB sp = sp + immediate.
17590           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
17591             break;
17592         }
17593         return;
17594     }
17595     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
17596     break;
17597   }
17598 
17599   if (Result.getNode()) {
17600     Ops.push_back(Result);
17601     return;
17602   }
17603   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
17604 }
17605 
17606 static RTLIB::Libcall getDivRemLibcall(
17607     const SDNode *N, MVT::SimpleValueType SVT) {
17608   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
17609           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
17610          "Unhandled Opcode in getDivRemLibcall");
17611   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
17612                   N->getOpcode() == ISD::SREM;
17613   RTLIB::Libcall LC;
17614   switch (SVT) {
17615   default: llvm_unreachable("Unexpected request for libcall!");
17616   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
17617   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
17618   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
17619   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
17620   }
17621   return LC;
17622 }
17623 
17624 static TargetLowering::ArgListTy getDivRemArgList(
17625     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
17626   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
17627           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
17628          "Unhandled Opcode in getDivRemArgList");
17629   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
17630                   N->getOpcode() == ISD::SREM;
17631   TargetLowering::ArgListTy Args;
17632   TargetLowering::ArgListEntry Entry;
17633   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
17634     EVT ArgVT = N->getOperand(i).getValueType();
17635     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
17636     Entry.Node = N->getOperand(i);
17637     Entry.Ty = ArgTy;
17638     Entry.IsSExt = isSigned;
17639     Entry.IsZExt = !isSigned;
17640     Args.push_back(Entry);
17641   }
17642   if (Subtarget->isTargetWindows() && Args.size() >= 2)
17643     std::swap(Args[0], Args[1]);
17644   return Args;
17645 }
17646 
17647 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
17648   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
17649           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
17650           Subtarget->isTargetWindows()) &&
17651          "Register-based DivRem lowering only");
17652   unsigned Opcode = Op->getOpcode();
17653   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
17654          "Invalid opcode for Div/Rem lowering");
17655   bool isSigned = (Opcode == ISD::SDIVREM);
17656   EVT VT = Op->getValueType(0);
17657   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
17658   SDLoc dl(Op);
17659 
17660   // If the target has hardware divide, use divide + multiply + subtract:
17661   //     div = a / b
17662   //     rem = a - b * div
17663   //     return {div, rem}
17664   // This should be lowered into UDIV/SDIV + MLS later on.
17665   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
17666                                         : Subtarget->hasDivideInARMMode();
17667   if (hasDivide && Op->getValueType(0).isSimple() &&
17668       Op->getSimpleValueType(0) == MVT::i32) {
17669     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
17670     const SDValue Dividend = Op->getOperand(0);
17671     const SDValue Divisor = Op->getOperand(1);
17672     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
17673     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
17674     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
17675 
17676     SDValue Values[2] = {Div, Rem};
17677     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
17678   }
17679 
17680   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
17681                                        VT.getSimpleVT().SimpleTy);
17682   SDValue InChain = DAG.getEntryNode();
17683 
17684   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
17685                                                     DAG.getContext(),
17686                                                     Subtarget);
17687 
17688   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
17689                                          getPointerTy(DAG.getDataLayout()));
17690 
17691   Type *RetTy = StructType::get(Ty, Ty);
17692 
17693   if (Subtarget->isTargetWindows())
17694     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
17695 
17696   TargetLowering::CallLoweringInfo CLI(DAG);
17697   CLI.setDebugLoc(dl).setChain(InChain)
17698     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
17699     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
17700 
17701   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
17702   return CallInfo.first;
17703 }
17704 
17705 // Lowers REM using divmod helpers
17706 // see RTABI section 4.2/4.3
17707 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
17708   // Build return types (div and rem)
17709   std::vector<Type*> RetTyParams;
17710   Type *RetTyElement;
17711 
17712   switch (N->getValueType(0).getSimpleVT().SimpleTy) {
17713   default: llvm_unreachable("Unexpected request for libcall!");
17714   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
17715   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
17716   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
17717   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
17718   }
17719 
17720   RetTyParams.push_back(RetTyElement);
17721   RetTyParams.push_back(RetTyElement);
17722   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
17723   Type *RetTy = StructType::get(*DAG.getContext(), ret);
17724 
17725   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
17726                                                              SimpleTy);
17727   SDValue InChain = DAG.getEntryNode();
17728   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
17729                                                     Subtarget);
17730   bool isSigned = N->getOpcode() == ISD::SREM;
17731   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
17732                                          getPointerTy(DAG.getDataLayout()));
17733 
17734   if (Subtarget->isTargetWindows())
17735     InChain = WinDBZCheckDenominator(DAG, N, InChain);
17736 
17737   // Lower call
17738   CallLoweringInfo CLI(DAG);
17739   CLI.setChain(InChain)
17740      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
17741      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
17742   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
17743 
17744   // Return second (rem) result operand (first contains div)
17745   SDNode *ResNode = CallResult.first.getNode();
17746   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
17747   return ResNode->getOperand(1);
17748 }
17749 
17750 SDValue
17751 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
17752   assert(Subtarget->isTargetWindows() && "unsupported target platform");
17753   SDLoc DL(Op);
17754 
17755   // Get the inputs.
17756   SDValue Chain = Op.getOperand(0);
17757   SDValue Size  = Op.getOperand(1);
17758 
17759   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
17760           "no-stack-arg-probe")) {
17761     MaybeAlign Align =
17762         cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
17763     SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
17764     Chain = SP.getValue(1);
17765     SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
17766     if (Align)
17767       SP =
17768           DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
17769                       DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32));
17770     Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
17771     SDValue Ops[2] = { SP, Chain };
17772     return DAG.getMergeValues(Ops, DL);
17773   }
17774 
17775   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
17776                               DAG.getConstant(2, DL, MVT::i32));
17777 
17778   SDValue Flag;
17779   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
17780   Flag = Chain.getValue(1);
17781 
17782   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17783   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
17784 
17785   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
17786   Chain = NewSP.getValue(1);
17787 
17788   SDValue Ops[2] = { NewSP, Chain };
17789   return DAG.getMergeValues(Ops, DL);
17790 }
17791 
17792 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
17793   bool IsStrict = Op->isStrictFPOpcode();
17794   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
17795   const unsigned DstSz = Op.getValueType().getSizeInBits();
17796   const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
17797   assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
17798          "Unexpected type for custom-lowering FP_EXTEND");
17799 
17800   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
17801          "With both FP DP and 16, any FP conversion is legal!");
17802 
17803   assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
17804          "With FP16, 16 to 32 conversion is legal!");
17805 
17806   // Converting from 32 -> 64 is valid if we have FP64.
17807   if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) {
17808     // FIXME: Remove this when we have strict fp instruction selection patterns
17809     if (IsStrict) {
17810       SDLoc Loc(Op);
17811       SDValue Result = DAG.getNode(ISD::FP_EXTEND,
17812                                    Loc, Op.getValueType(), SrcVal);
17813       return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
17814     }
17815     return Op;
17816   }
17817 
17818   // Either we are converting from 16 -> 64, without FP16 and/or
17819   // FP.double-precision or without Armv8-fp. So we must do it in two
17820   // steps.
17821   // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
17822   // without FP16. So we must do a function call.
17823   SDLoc Loc(Op);
17824   RTLIB::Libcall LC;
17825   MakeLibCallOptions CallOptions;
17826   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
17827   for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
17828     bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64());
17829     MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32);
17830     MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64);
17831     if (Supported) {
17832       if (IsStrict) {
17833         SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc,
17834                              {DstVT, MVT::Other}, {Chain, SrcVal});
17835         Chain = SrcVal.getValue(1);
17836       } else {
17837         SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal);
17838       }
17839     } else {
17840       LC = RTLIB::getFPEXT(SrcVT, DstVT);
17841       assert(LC != RTLIB::UNKNOWN_LIBCALL &&
17842              "Unexpected type for custom-lowering FP_EXTEND");
17843       std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
17844                                             Loc, Chain);
17845     }
17846   }
17847 
17848   return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
17849 }
17850 
17851 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
17852   bool IsStrict = Op->isStrictFPOpcode();
17853 
17854   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
17855   EVT SrcVT = SrcVal.getValueType();
17856   EVT DstVT = Op.getValueType();
17857   const unsigned DstSz = Op.getValueType().getSizeInBits();
17858   const unsigned SrcSz = SrcVT.getSizeInBits();
17859   (void)DstSz;
17860   assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
17861          "Unexpected type for custom-lowering FP_ROUND");
17862 
17863   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
17864          "With both FP DP and 16, any FP conversion is legal!");
17865 
17866   SDLoc Loc(Op);
17867 
17868   // Instruction from 32 -> 16 if hasFP16 is valid
17869   if (SrcSz == 32 && Subtarget->hasFP16())
17870     return Op;
17871 
17872   // Lib call from 32 -> 16 / 64 -> [32, 16]
17873   RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
17874   assert(LC != RTLIB::UNKNOWN_LIBCALL &&
17875          "Unexpected type for custom-lowering FP_ROUND");
17876   MakeLibCallOptions CallOptions;
17877   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
17878   SDValue Result;
17879   std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
17880                                         Loc, Chain);
17881   return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
17882 }
17883 
17884 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
17885                                  SelectionDAG &DAG) const {
17886   assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS.");
17887   MVT HalfT = MVT::i32;
17888   SDLoc dl(N);
17889   SDValue Hi, Lo, Tmp;
17890 
17891   if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) ||
17892       !isOperationLegalOrCustom(ISD::UADDO, HalfT))
17893     return ;
17894 
17895   unsigned OpTypeBits = HalfT.getScalarSizeInBits();
17896   SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
17897 
17898   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
17899                    DAG.getConstant(0, dl, HalfT));
17900   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
17901                    DAG.getConstant(1, dl, HalfT));
17902 
17903   Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi,
17904                     DAG.getConstant(OpTypeBits - 1, dl,
17905                     getShiftAmountTy(HalfT, DAG.getDataLayout())));
17906   Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
17907   Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
17908                    SDValue(Lo.getNode(), 1));
17909   Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
17910   Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
17911 
17912   Results.push_back(Lo);
17913   Results.push_back(Hi);
17914 }
17915 
17916 bool
17917 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
17918   // The ARM target isn't yet aware of offsets.
17919   return false;
17920 }
17921 
17922 bool ARM::isBitFieldInvertedMask(unsigned v) {
17923   if (v == 0xffffffff)
17924     return false;
17925 
17926   // there can be 1's on either or both "outsides", all the "inside"
17927   // bits must be 0's
17928   return isShiftedMask_32(~v);
17929 }
17930 
17931 /// isFPImmLegal - Returns true if the target can instruction select the
17932 /// specified FP immediate natively. If false, the legalizer will
17933 /// materialize the FP immediate as a load from a constant pool.
17934 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
17935                                      bool ForCodeSize) const {
17936   if (!Subtarget->hasVFP3Base())
17937     return false;
17938   if (VT == MVT::f16 && Subtarget->hasFullFP16())
17939     return ARM_AM::getFP16Imm(Imm) != -1;
17940   if (VT == MVT::f32)
17941     return ARM_AM::getFP32Imm(Imm) != -1;
17942   if (VT == MVT::f64 && Subtarget->hasFP64())
17943     return ARM_AM::getFP64Imm(Imm) != -1;
17944   return false;
17945 }
17946 
17947 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
17948 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
17949 /// specified in the intrinsic calls.
17950 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17951                                            const CallInst &I,
17952                                            MachineFunction &MF,
17953                                            unsigned Intrinsic) const {
17954   switch (Intrinsic) {
17955   case Intrinsic::arm_neon_vld1:
17956   case Intrinsic::arm_neon_vld2:
17957   case Intrinsic::arm_neon_vld3:
17958   case Intrinsic::arm_neon_vld4:
17959   case Intrinsic::arm_neon_vld2lane:
17960   case Intrinsic::arm_neon_vld3lane:
17961   case Intrinsic::arm_neon_vld4lane:
17962   case Intrinsic::arm_neon_vld2dup:
17963   case Intrinsic::arm_neon_vld3dup:
17964   case Intrinsic::arm_neon_vld4dup: {
17965     Info.opc = ISD::INTRINSIC_W_CHAIN;
17966     // Conservatively set memVT to the entire set of vectors loaded.
17967     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
17968     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
17969     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
17970     Info.ptrVal = I.getArgOperand(0);
17971     Info.offset = 0;
17972     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
17973     Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
17974     // volatile loads with NEON intrinsics not supported
17975     Info.flags = MachineMemOperand::MOLoad;
17976     return true;
17977   }
17978   case Intrinsic::arm_neon_vld1x2:
17979   case Intrinsic::arm_neon_vld1x3:
17980   case Intrinsic::arm_neon_vld1x4: {
17981     Info.opc = ISD::INTRINSIC_W_CHAIN;
17982     // Conservatively set memVT to the entire set of vectors loaded.
17983     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
17984     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
17985     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
17986     Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
17987     Info.offset = 0;
17988     Info.align.reset();
17989     // volatile loads with NEON intrinsics not supported
17990     Info.flags = MachineMemOperand::MOLoad;
17991     return true;
17992   }
17993   case Intrinsic::arm_neon_vst1:
17994   case Intrinsic::arm_neon_vst2:
17995   case Intrinsic::arm_neon_vst3:
17996   case Intrinsic::arm_neon_vst4:
17997   case Intrinsic::arm_neon_vst2lane:
17998   case Intrinsic::arm_neon_vst3lane:
17999   case Intrinsic::arm_neon_vst4lane: {
18000     Info.opc = ISD::INTRINSIC_VOID;
18001     // Conservatively set memVT to the entire set of vectors stored.
18002     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18003     unsigned NumElts = 0;
18004     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
18005       Type *ArgTy = I.getArgOperand(ArgI)->getType();
18006       if (!ArgTy->isVectorTy())
18007         break;
18008       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
18009     }
18010     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
18011     Info.ptrVal = I.getArgOperand(0);
18012     Info.offset = 0;
18013     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
18014     Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
18015     // volatile stores with NEON intrinsics not supported
18016     Info.flags = MachineMemOperand::MOStore;
18017     return true;
18018   }
18019   case Intrinsic::arm_neon_vst1x2:
18020   case Intrinsic::arm_neon_vst1x3:
18021   case Intrinsic::arm_neon_vst1x4: {
18022     Info.opc = ISD::INTRINSIC_VOID;
18023     // Conservatively set memVT to the entire set of vectors stored.
18024     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18025     unsigned NumElts = 0;
18026     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
18027       Type *ArgTy = I.getArgOperand(ArgI)->getType();
18028       if (!ArgTy->isVectorTy())
18029         break;
18030       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
18031     }
18032     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
18033     Info.ptrVal = I.getArgOperand(0);
18034     Info.offset = 0;
18035     Info.align.reset();
18036     // volatile stores with NEON intrinsics not supported
18037     Info.flags = MachineMemOperand::MOStore;
18038     return true;
18039   }
18040   case Intrinsic::arm_mve_vld2q:
18041   case Intrinsic::arm_mve_vld4q: {
18042     Info.opc = ISD::INTRINSIC_W_CHAIN;
18043     // Conservatively set memVT to the entire set of vectors loaded.
18044     Type *VecTy = cast<StructType>(I.getType())->getElementType(1);
18045     unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4;
18046     Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
18047     Info.ptrVal = I.getArgOperand(0);
18048     Info.offset = 0;
18049     Info.align = Align(VecTy->getScalarSizeInBits() / 8);
18050     // volatile loads with MVE intrinsics not supported
18051     Info.flags = MachineMemOperand::MOLoad;
18052     return true;
18053   }
18054   case Intrinsic::arm_mve_vst2q:
18055   case Intrinsic::arm_mve_vst4q: {
18056     Info.opc = ISD::INTRINSIC_VOID;
18057     // Conservatively set memVT to the entire set of vectors stored.
18058     Type *VecTy = I.getArgOperand(1)->getType();
18059     unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4;
18060     Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
18061     Info.ptrVal = I.getArgOperand(0);
18062     Info.offset = 0;
18063     Info.align = Align(VecTy->getScalarSizeInBits() / 8);
18064     // volatile stores with MVE intrinsics not supported
18065     Info.flags = MachineMemOperand::MOStore;
18066     return true;
18067   }
18068   case Intrinsic::arm_ldaex:
18069   case Intrinsic::arm_ldrex: {
18070     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18071     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
18072     Info.opc = ISD::INTRINSIC_W_CHAIN;
18073     Info.memVT = MVT::getVT(PtrTy->getElementType());
18074     Info.ptrVal = I.getArgOperand(0);
18075     Info.offset = 0;
18076     Info.align = DL.getABITypeAlign(PtrTy->getElementType());
18077     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
18078     return true;
18079   }
18080   case Intrinsic::arm_stlex:
18081   case Intrinsic::arm_strex: {
18082     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18083     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
18084     Info.opc = ISD::INTRINSIC_W_CHAIN;
18085     Info.memVT = MVT::getVT(PtrTy->getElementType());
18086     Info.ptrVal = I.getArgOperand(1);
18087     Info.offset = 0;
18088     Info.align = DL.getABITypeAlign(PtrTy->getElementType());
18089     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
18090     return true;
18091   }
18092   case Intrinsic::arm_stlexd:
18093   case Intrinsic::arm_strexd:
18094     Info.opc = ISD::INTRINSIC_W_CHAIN;
18095     Info.memVT = MVT::i64;
18096     Info.ptrVal = I.getArgOperand(2);
18097     Info.offset = 0;
18098     Info.align = Align(8);
18099     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
18100     return true;
18101 
18102   case Intrinsic::arm_ldaexd:
18103   case Intrinsic::arm_ldrexd:
18104     Info.opc = ISD::INTRINSIC_W_CHAIN;
18105     Info.memVT = MVT::i64;
18106     Info.ptrVal = I.getArgOperand(0);
18107     Info.offset = 0;
18108     Info.align = Align(8);
18109     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
18110     return true;
18111 
18112   default:
18113     break;
18114   }
18115 
18116   return false;
18117 }
18118 
18119 /// Returns true if it is beneficial to convert a load of a constant
18120 /// to just the constant itself.
18121 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
18122                                                           Type *Ty) const {
18123   assert(Ty->isIntegerTy());
18124 
18125   unsigned Bits = Ty->getPrimitiveSizeInBits();
18126   if (Bits == 0 || Bits > 32)
18127     return false;
18128   return true;
18129 }
18130 
18131 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
18132                                                 unsigned Index) const {
18133   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
18134     return false;
18135 
18136   return (Index == 0 || Index == ResVT.getVectorNumElements());
18137 }
18138 
18139 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
18140                                         ARM_MB::MemBOpt Domain) const {
18141   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18142 
18143   // First, if the target has no DMB, see what fallback we can use.
18144   if (!Subtarget->hasDataBarrier()) {
18145     // Some ARMv6 cpus can support data barriers with an mcr instruction.
18146     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
18147     // here.
18148     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
18149       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
18150       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
18151                         Builder.getInt32(0), Builder.getInt32(7),
18152                         Builder.getInt32(10), Builder.getInt32(5)};
18153       return Builder.CreateCall(MCR, args);
18154     } else {
18155       // Instead of using barriers, atomic accesses on these subtargets use
18156       // libcalls.
18157       llvm_unreachable("makeDMB on a target so old that it has no barriers");
18158     }
18159   } else {
18160     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
18161     // Only a full system barrier exists in the M-class architectures.
18162     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
18163     Constant *CDomain = Builder.getInt32(Domain);
18164     return Builder.CreateCall(DMB, CDomain);
18165   }
18166 }
18167 
18168 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
18169 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
18170                                                  Instruction *Inst,
18171                                                  AtomicOrdering Ord) const {
18172   switch (Ord) {
18173   case AtomicOrdering::NotAtomic:
18174   case AtomicOrdering::Unordered:
18175     llvm_unreachable("Invalid fence: unordered/non-atomic");
18176   case AtomicOrdering::Monotonic:
18177   case AtomicOrdering::Acquire:
18178     return nullptr; // Nothing to do
18179   case AtomicOrdering::SequentiallyConsistent:
18180     if (!Inst->hasAtomicStore())
18181       return nullptr; // Nothing to do
18182     LLVM_FALLTHROUGH;
18183   case AtomicOrdering::Release:
18184   case AtomicOrdering::AcquireRelease:
18185     if (Subtarget->preferISHSTBarriers())
18186       return makeDMB(Builder, ARM_MB::ISHST);
18187     // FIXME: add a comment with a link to documentation justifying this.
18188     else
18189       return makeDMB(Builder, ARM_MB::ISH);
18190   }
18191   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
18192 }
18193 
18194 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
18195                                                   Instruction *Inst,
18196                                                   AtomicOrdering Ord) const {
18197   switch (Ord) {
18198   case AtomicOrdering::NotAtomic:
18199   case AtomicOrdering::Unordered:
18200     llvm_unreachable("Invalid fence: unordered/not-atomic");
18201   case AtomicOrdering::Monotonic:
18202   case AtomicOrdering::Release:
18203     return nullptr; // Nothing to do
18204   case AtomicOrdering::Acquire:
18205   case AtomicOrdering::AcquireRelease:
18206   case AtomicOrdering::SequentiallyConsistent:
18207     return makeDMB(Builder, ARM_MB::ISH);
18208   }
18209   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
18210 }
18211 
18212 // Loads and stores less than 64-bits are already atomic; ones above that
18213 // are doomed anyway, so defer to the default libcall and blame the OS when
18214 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
18215 // anything for those.
18216 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
18217   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
18218   return (Size == 64) && !Subtarget->isMClass();
18219 }
18220 
18221 // Loads and stores less than 64-bits are already atomic; ones above that
18222 // are doomed anyway, so defer to the default libcall and blame the OS when
18223 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
18224 // anything for those.
18225 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
18226 // guarantee, see DDI0406C ARM architecture reference manual,
18227 // sections A8.8.72-74 LDRD)
18228 TargetLowering::AtomicExpansionKind
18229 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
18230   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
18231   return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
18232                                                   : AtomicExpansionKind::None;
18233 }
18234 
18235 // For the real atomic operations, we have ldrex/strex up to 32 bits,
18236 // and up to 64 bits on the non-M profiles
18237 TargetLowering::AtomicExpansionKind
18238 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
18239   if (AI->isFloatingPointOperation())
18240     return AtomicExpansionKind::CmpXChg;
18241 
18242   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
18243   bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
18244   return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
18245              ? AtomicExpansionKind::LLSC
18246              : AtomicExpansionKind::None;
18247 }
18248 
18249 TargetLowering::AtomicExpansionKind
18250 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
18251   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
18252   // implement cmpxchg without spilling. If the address being exchanged is also
18253   // on the stack and close enough to the spill slot, this can lead to a
18254   // situation where the monitor always gets cleared and the atomic operation
18255   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
18256   bool HasAtomicCmpXchg =
18257       !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
18258   if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg)
18259     return AtomicExpansionKind::LLSC;
18260   return AtomicExpansionKind::None;
18261 }
18262 
18263 bool ARMTargetLowering::shouldInsertFencesForAtomic(
18264     const Instruction *I) const {
18265   return InsertFencesForAtomic;
18266 }
18267 
18268 // This has so far only been implemented for MachO.
18269 bool ARMTargetLowering::useLoadStackGuardNode() const {
18270   return Subtarget->isTargetMachO();
18271 }
18272 
18273 void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
18274   if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18275     return TargetLowering::insertSSPDeclarations(M);
18276 
18277   // MSVC CRT has a global variable holding security cookie.
18278   M.getOrInsertGlobal("__security_cookie",
18279                       Type::getInt8PtrTy(M.getContext()));
18280 
18281   // MSVC CRT has a function to validate security cookie.
18282   FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
18283       "__security_check_cookie", Type::getVoidTy(M.getContext()),
18284       Type::getInt8PtrTy(M.getContext()));
18285   if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
18286     F->addAttribute(1, Attribute::AttrKind::InReg);
18287 }
18288 
18289 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
18290   // MSVC CRT has a global variable holding security cookie.
18291   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18292     return M.getGlobalVariable("__security_cookie");
18293   return TargetLowering::getSDagStackGuard(M);
18294 }
18295 
18296 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
18297   // MSVC CRT has a function to validate security cookie.
18298   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18299     return M.getFunction("__security_check_cookie");
18300   return TargetLowering::getSSPStackGuardCheck(M);
18301 }
18302 
18303 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
18304                                                   unsigned &Cost) const {
18305   // If we do not have NEON, vector types are not natively supported.
18306   if (!Subtarget->hasNEON())
18307     return false;
18308 
18309   // Floating point values and vector values map to the same register file.
18310   // Therefore, although we could do a store extract of a vector type, this is
18311   // better to leave at float as we have more freedom in the addressing mode for
18312   // those.
18313   if (VectorTy->isFPOrFPVectorTy())
18314     return false;
18315 
18316   // If the index is unknown at compile time, this is very expensive to lower
18317   // and it is not possible to combine the store with the extract.
18318   if (!isa<ConstantInt>(Idx))
18319     return false;
18320 
18321   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
18322   unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize();
18323   // We can do a store + vector extract on any vector that fits perfectly in a D
18324   // or Q register.
18325   if (BitWidth == 64 || BitWidth == 128) {
18326     Cost = 0;
18327     return true;
18328   }
18329   return false;
18330 }
18331 
18332 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
18333   return Subtarget->hasV6T2Ops();
18334 }
18335 
18336 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
18337   return Subtarget->hasV6T2Ops();
18338 }
18339 
18340 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
18341   return !Subtarget->hasMinSize() || Subtarget->isTargetWindows();
18342 }
18343 
18344 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
18345                                          AtomicOrdering Ord) const {
18346   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18347   Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
18348   bool IsAcquire = isAcquireOrStronger(Ord);
18349 
18350   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
18351   // intrinsic must return {i32, i32} and we have to recombine them into a
18352   // single i64 here.
18353   if (ValTy->getPrimitiveSizeInBits() == 64) {
18354     Intrinsic::ID Int =
18355         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
18356     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
18357 
18358     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
18359     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
18360 
18361     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
18362     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
18363     if (!Subtarget->isLittle())
18364       std::swap (Lo, Hi);
18365     Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
18366     Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
18367     return Builder.CreateOr(
18368         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
18369   }
18370 
18371   Type *Tys[] = { Addr->getType() };
18372   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
18373   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
18374 
18375   return Builder.CreateTruncOrBitCast(
18376       Builder.CreateCall(Ldrex, Addr),
18377       cast<PointerType>(Addr->getType())->getElementType());
18378 }
18379 
18380 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
18381     IRBuilder<> &Builder) const {
18382   if (!Subtarget->hasV7Ops())
18383     return;
18384   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18385   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
18386 }
18387 
18388 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
18389                                                Value *Addr,
18390                                                AtomicOrdering Ord) const {
18391   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18392   bool IsRelease = isReleaseOrStronger(Ord);
18393 
18394   // Since the intrinsics must have legal type, the i64 intrinsics take two
18395   // parameters: "i32, i32". We must marshal Val into the appropriate form
18396   // before the call.
18397   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
18398     Intrinsic::ID Int =
18399         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
18400     Function *Strex = Intrinsic::getDeclaration(M, Int);
18401     Type *Int32Ty = Type::getInt32Ty(M->getContext());
18402 
18403     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
18404     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
18405     if (!Subtarget->isLittle())
18406       std::swap(Lo, Hi);
18407     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
18408     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
18409   }
18410 
18411   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
18412   Type *Tys[] = { Addr->getType() };
18413   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
18414 
18415   return Builder.CreateCall(
18416       Strex, {Builder.CreateZExtOrBitCast(
18417                   Val, Strex->getFunctionType()->getParamType(0)),
18418               Addr});
18419 }
18420 
18421 
18422 bool ARMTargetLowering::alignLoopsWithOptSize() const {
18423   return Subtarget->isMClass();
18424 }
18425 
18426 /// A helper function for determining the number of interleaved accesses we
18427 /// will generate when lowering accesses of the given type.
18428 unsigned
18429 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
18430                                              const DataLayout &DL) const {
18431   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
18432 }
18433 
18434 bool ARMTargetLowering::isLegalInterleavedAccessType(
18435     unsigned Factor, FixedVectorType *VecTy, const DataLayout &DL) const {
18436 
18437   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
18438   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
18439 
18440   if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps())
18441     return false;
18442 
18443   // Ensure the vector doesn't have f16 elements. Even though we could do an
18444   // i16 vldN, we can't hold the f16 vectors and will end up converting via
18445   // f32.
18446   if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy())
18447     return false;
18448   if (Subtarget->hasMVEIntegerOps() && Factor == 3)
18449     return false;
18450 
18451   // Ensure the number of vector elements is greater than 1.
18452   if (VecTy->getNumElements() < 2)
18453     return false;
18454 
18455   // Ensure the element type is legal.
18456   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
18457     return false;
18458 
18459   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
18460   // 128 will be split into multiple interleaved accesses.
18461   if (Subtarget->hasNEON() && VecSize == 64)
18462     return true;
18463   return VecSize % 128 == 0;
18464 }
18465 
18466 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
18467   if (Subtarget->hasNEON())
18468     return 4;
18469   if (Subtarget->hasMVEIntegerOps())
18470     return MVEMaxSupportedInterleaveFactor;
18471   return TargetLoweringBase::getMaxSupportedInterleaveFactor();
18472 }
18473 
18474 /// Lower an interleaved load into a vldN intrinsic.
18475 ///
18476 /// E.g. Lower an interleaved load (Factor = 2):
18477 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
18478 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
18479 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
18480 ///
18481 ///      Into:
18482 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
18483 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
18484 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
18485 bool ARMTargetLowering::lowerInterleavedLoad(
18486     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
18487     ArrayRef<unsigned> Indices, unsigned Factor) const {
18488   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
18489          "Invalid interleave factor");
18490   assert(!Shuffles.empty() && "Empty shufflevector input");
18491   assert(Shuffles.size() == Indices.size() &&
18492          "Unmatched number of shufflevectors and indices");
18493 
18494   auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType());
18495   Type *EltTy = VecTy->getElementType();
18496 
18497   const DataLayout &DL = LI->getModule()->getDataLayout();
18498 
18499   // Skip if we do not have NEON and skip illegal vector types. We can
18500   // "legalize" wide vector types into multiple interleaved accesses as long as
18501   // the vector types are divisible by 128.
18502   if (!isLegalInterleavedAccessType(Factor, VecTy, DL))
18503     return false;
18504 
18505   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
18506 
18507   // A pointer vector can not be the return type of the ldN intrinsics. Need to
18508   // load integer vectors first and then convert to pointer vectors.
18509   if (EltTy->isPointerTy())
18510     VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy);
18511 
18512   IRBuilder<> Builder(LI);
18513 
18514   // The base address of the load.
18515   Value *BaseAddr = LI->getPointerOperand();
18516 
18517   if (NumLoads > 1) {
18518     // If we're going to generate more than one load, reset the sub-vector type
18519     // to something legal.
18520     VecTy = FixedVectorType::get(VecTy->getElementType(),
18521                                  VecTy->getNumElements() / NumLoads);
18522 
18523     // We will compute the pointer operand of each load from the original base
18524     // address using GEPs. Cast the base address to a pointer to the scalar
18525     // element type.
18526     BaseAddr = Builder.CreateBitCast(
18527         BaseAddr,
18528         VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
18529   }
18530 
18531   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
18532 
18533   auto createLoadIntrinsic = [&](Value *BaseAddr) {
18534     if (Subtarget->hasNEON()) {
18535       Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
18536       Type *Tys[] = {VecTy, Int8Ptr};
18537       static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
18538                                                 Intrinsic::arm_neon_vld3,
18539                                                 Intrinsic::arm_neon_vld4};
18540       Function *VldnFunc =
18541           Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
18542 
18543       SmallVector<Value *, 2> Ops;
18544       Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
18545       Ops.push_back(Builder.getInt32(LI->getAlignment()));
18546 
18547       return Builder.CreateCall(VldnFunc, Ops, "vldN");
18548     } else {
18549       assert((Factor == 2 || Factor == 4) &&
18550              "expected interleave factor of 2 or 4 for MVE");
18551       Intrinsic::ID LoadInts =
18552           Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
18553       Type *VecEltTy =
18554           VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace());
18555       Type *Tys[] = {VecTy, VecEltTy};
18556       Function *VldnFunc =
18557           Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
18558 
18559       SmallVector<Value *, 2> Ops;
18560       Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy));
18561       return Builder.CreateCall(VldnFunc, Ops, "vldN");
18562     }
18563   };
18564 
18565   // Holds sub-vectors extracted from the load intrinsic return values. The
18566   // sub-vectors are associated with the shufflevector instructions they will
18567   // replace.
18568   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
18569 
18570   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
18571     // If we're generating more than one load, compute the base address of
18572     // subsequent loads as an offset from the previous.
18573     if (LoadCount > 0)
18574       BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
18575                                             VecTy->getNumElements() * Factor);
18576 
18577     CallInst *VldN = createLoadIntrinsic(BaseAddr);
18578 
18579     // Replace uses of each shufflevector with the corresponding vector loaded
18580     // by ldN.
18581     for (unsigned i = 0; i < Shuffles.size(); i++) {
18582       ShuffleVectorInst *SV = Shuffles[i];
18583       unsigned Index = Indices[i];
18584 
18585       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
18586 
18587       // Convert the integer vector to pointer vector if the element is pointer.
18588       if (EltTy->isPointerTy())
18589         SubVec = Builder.CreateIntToPtr(
18590             SubVec,
18591             FixedVectorType::get(SV->getType()->getElementType(), VecTy));
18592 
18593       SubVecs[SV].push_back(SubVec);
18594     }
18595   }
18596 
18597   // Replace uses of the shufflevector instructions with the sub-vectors
18598   // returned by the load intrinsic. If a shufflevector instruction is
18599   // associated with more than one sub-vector, those sub-vectors will be
18600   // concatenated into a single wide vector.
18601   for (ShuffleVectorInst *SVI : Shuffles) {
18602     auto &SubVec = SubVecs[SVI];
18603     auto *WideVec =
18604         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
18605     SVI->replaceAllUsesWith(WideVec);
18606   }
18607 
18608   return true;
18609 }
18610 
18611 /// Lower an interleaved store into a vstN intrinsic.
18612 ///
18613 /// E.g. Lower an interleaved store (Factor = 3):
18614 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
18615 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
18616 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
18617 ///
18618 ///      Into:
18619 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
18620 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
18621 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
18622 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
18623 ///
18624 /// Note that the new shufflevectors will be removed and we'll only generate one
18625 /// vst3 instruction in CodeGen.
18626 ///
18627 /// Example for a more general valid mask (Factor 3). Lower:
18628 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
18629 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
18630 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
18631 ///
18632 ///      Into:
18633 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
18634 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
18635 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
18636 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
18637 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
18638                                               ShuffleVectorInst *SVI,
18639                                               unsigned Factor) const {
18640   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
18641          "Invalid interleave factor");
18642 
18643   auto *VecTy = cast<FixedVectorType>(SVI->getType());
18644   assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
18645 
18646   unsigned LaneLen = VecTy->getNumElements() / Factor;
18647   Type *EltTy = VecTy->getElementType();
18648   auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
18649 
18650   const DataLayout &DL = SI->getModule()->getDataLayout();
18651 
18652   // Skip if we do not have NEON and skip illegal vector types. We can
18653   // "legalize" wide vector types into multiple interleaved accesses as long as
18654   // the vector types are divisible by 128.
18655   if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL))
18656     return false;
18657 
18658   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
18659 
18660   Value *Op0 = SVI->getOperand(0);
18661   Value *Op1 = SVI->getOperand(1);
18662   IRBuilder<> Builder(SI);
18663 
18664   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
18665   // vectors to integer vectors.
18666   if (EltTy->isPointerTy()) {
18667     Type *IntTy = DL.getIntPtrType(EltTy);
18668 
18669     // Convert to the corresponding integer vector.
18670     auto *IntVecTy =
18671         FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType()));
18672     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
18673     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
18674 
18675     SubVecTy = FixedVectorType::get(IntTy, LaneLen);
18676   }
18677 
18678   // The base address of the store.
18679   Value *BaseAddr = SI->getPointerOperand();
18680 
18681   if (NumStores > 1) {
18682     // If we're going to generate more than one store, reset the lane length
18683     // and sub-vector type to something legal.
18684     LaneLen /= NumStores;
18685     SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
18686 
18687     // We will compute the pointer operand of each store from the original base
18688     // address using GEPs. Cast the base address to a pointer to the scalar
18689     // element type.
18690     BaseAddr = Builder.CreateBitCast(
18691         BaseAddr,
18692         SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
18693   }
18694 
18695   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
18696 
18697   auto Mask = SVI->getShuffleMask();
18698 
18699   auto createStoreIntrinsic = [&](Value *BaseAddr,
18700                                   SmallVectorImpl<Value *> &Shuffles) {
18701     if (Subtarget->hasNEON()) {
18702       static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
18703                                                  Intrinsic::arm_neon_vst3,
18704                                                  Intrinsic::arm_neon_vst4};
18705       Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
18706       Type *Tys[] = {Int8Ptr, SubVecTy};
18707 
18708       Function *VstNFunc = Intrinsic::getDeclaration(
18709           SI->getModule(), StoreInts[Factor - 2], Tys);
18710 
18711       SmallVector<Value *, 6> Ops;
18712       Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
18713       for (auto S : Shuffles)
18714         Ops.push_back(S);
18715       Ops.push_back(Builder.getInt32(SI->getAlignment()));
18716       Builder.CreateCall(VstNFunc, Ops);
18717     } else {
18718       assert((Factor == 2 || Factor == 4) &&
18719              "expected interleave factor of 2 or 4 for MVE");
18720       Intrinsic::ID StoreInts =
18721           Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
18722       Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo(
18723           SI->getPointerAddressSpace());
18724       Type *Tys[] = {EltPtrTy, SubVecTy};
18725       Function *VstNFunc =
18726           Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
18727 
18728       SmallVector<Value *, 6> Ops;
18729       Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy));
18730       for (auto S : Shuffles)
18731         Ops.push_back(S);
18732       for (unsigned F = 0; F < Factor; F++) {
18733         Ops.push_back(Builder.getInt32(F));
18734         Builder.CreateCall(VstNFunc, Ops);
18735         Ops.pop_back();
18736       }
18737     }
18738   };
18739 
18740   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
18741     // If we generating more than one store, we compute the base address of
18742     // subsequent stores as an offset from the previous.
18743     if (StoreCount > 0)
18744       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
18745                                             BaseAddr, LaneLen * Factor);
18746 
18747     SmallVector<Value *, 4> Shuffles;
18748 
18749     // Split the shufflevector operands into sub vectors for the new vstN call.
18750     for (unsigned i = 0; i < Factor; i++) {
18751       unsigned IdxI = StoreCount * LaneLen * Factor + i;
18752       if (Mask[IdxI] >= 0) {
18753         Shuffles.push_back(Builder.CreateShuffleVector(
18754             Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)));
18755       } else {
18756         unsigned StartMask = 0;
18757         for (unsigned j = 1; j < LaneLen; j++) {
18758           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
18759           if (Mask[IdxJ * Factor + IdxI] >= 0) {
18760             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
18761             break;
18762           }
18763         }
18764         // Note: If all elements in a chunk are undefs, StartMask=0!
18765         // Note: Filling undef gaps with random elements is ok, since
18766         // those elements were being written anyway (with undefs).
18767         // In the case of all undefs we're defaulting to using elems from 0
18768         // Note: StartMask cannot be negative, it's checked in
18769         // isReInterleaveMask
18770         Shuffles.push_back(Builder.CreateShuffleVector(
18771             Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)));
18772       }
18773     }
18774 
18775     createStoreIntrinsic(BaseAddr, Shuffles);
18776   }
18777   return true;
18778 }
18779 
18780 enum HABaseType {
18781   HA_UNKNOWN = 0,
18782   HA_FLOAT,
18783   HA_DOUBLE,
18784   HA_VECT64,
18785   HA_VECT128
18786 };
18787 
18788 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
18789                                    uint64_t &Members) {
18790   if (auto *ST = dyn_cast<StructType>(Ty)) {
18791     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
18792       uint64_t SubMembers = 0;
18793       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
18794         return false;
18795       Members += SubMembers;
18796     }
18797   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
18798     uint64_t SubMembers = 0;
18799     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
18800       return false;
18801     Members += SubMembers * AT->getNumElements();
18802   } else if (Ty->isFloatTy()) {
18803     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
18804       return false;
18805     Members = 1;
18806     Base = HA_FLOAT;
18807   } else if (Ty->isDoubleTy()) {
18808     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
18809       return false;
18810     Members = 1;
18811     Base = HA_DOUBLE;
18812   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
18813     Members = 1;
18814     switch (Base) {
18815     case HA_FLOAT:
18816     case HA_DOUBLE:
18817       return false;
18818     case HA_VECT64:
18819       return VT->getPrimitiveSizeInBits().getFixedSize() == 64;
18820     case HA_VECT128:
18821       return VT->getPrimitiveSizeInBits().getFixedSize() == 128;
18822     case HA_UNKNOWN:
18823       switch (VT->getPrimitiveSizeInBits().getFixedSize()) {
18824       case 64:
18825         Base = HA_VECT64;
18826         return true;
18827       case 128:
18828         Base = HA_VECT128;
18829         return true;
18830       default:
18831         return false;
18832       }
18833     }
18834   }
18835 
18836   return (Members > 0 && Members <= 4);
18837 }
18838 
18839 /// Return the correct alignment for the current calling convention.
18840 Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy,
18841                                                        DataLayout DL) const {
18842   const Align ABITypeAlign = DL.getABITypeAlign(ArgTy);
18843   if (!ArgTy->isVectorTy())
18844     return ABITypeAlign;
18845 
18846   // Avoid over-aligning vector parameters. It would require realigning the
18847   // stack and waste space for no real benefit.
18848   return std::min(ABITypeAlign, DL.getStackAlignment());
18849 }
18850 
18851 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
18852 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
18853 /// passing according to AAPCS rules.
18854 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
18855     Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
18856   if (getEffectiveCallingConv(CallConv, isVarArg) !=
18857       CallingConv::ARM_AAPCS_VFP)
18858     return false;
18859 
18860   HABaseType Base = HA_UNKNOWN;
18861   uint64_t Members = 0;
18862   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
18863   LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
18864 
18865   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
18866   return IsHA || IsIntArray;
18867 }
18868 
18869 Register ARMTargetLowering::getExceptionPointerRegister(
18870     const Constant *PersonalityFn) const {
18871   // Platforms which do not use SjLj EH may return values in these registers
18872   // via the personality function.
18873   return Subtarget->useSjLjEH() ? Register() : ARM::R0;
18874 }
18875 
18876 Register ARMTargetLowering::getExceptionSelectorRegister(
18877     const Constant *PersonalityFn) const {
18878   // Platforms which do not use SjLj EH may return values in these registers
18879   // via the personality function.
18880   return Subtarget->useSjLjEH() ? Register() : ARM::R1;
18881 }
18882 
18883 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
18884   // Update IsSplitCSR in ARMFunctionInfo.
18885   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
18886   AFI->setIsSplitCSR(true);
18887 }
18888 
18889 void ARMTargetLowering::insertCopiesSplitCSR(
18890     MachineBasicBlock *Entry,
18891     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
18892   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
18893   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
18894   if (!IStart)
18895     return;
18896 
18897   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18898   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
18899   MachineBasicBlock::iterator MBBI = Entry->begin();
18900   for (const MCPhysReg *I = IStart; *I; ++I) {
18901     const TargetRegisterClass *RC = nullptr;
18902     if (ARM::GPRRegClass.contains(*I))
18903       RC = &ARM::GPRRegClass;
18904     else if (ARM::DPRRegClass.contains(*I))
18905       RC = &ARM::DPRRegClass;
18906     else
18907       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
18908 
18909     Register NewVR = MRI->createVirtualRegister(RC);
18910     // Create copy from CSR to a virtual register.
18911     // FIXME: this currently does not emit CFI pseudo-instructions, it works
18912     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
18913     // nounwind. If we want to generalize this later, we may need to emit
18914     // CFI pseudo-instructions.
18915     assert(Entry->getParent()->getFunction().hasFnAttribute(
18916                Attribute::NoUnwind) &&
18917            "Function should be nounwind in insertCopiesSplitCSR!");
18918     Entry->addLiveIn(*I);
18919     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
18920         .addReg(*I);
18921 
18922     // Insert the copy-back instructions right before the terminator.
18923     for (auto *Exit : Exits)
18924       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
18925               TII->get(TargetOpcode::COPY), *I)
18926           .addReg(NewVR);
18927   }
18928 }
18929 
18930 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
18931   MF.getFrameInfo().computeMaxCallFrameSize(MF);
18932   TargetLoweringBase::finalizeLowering(MF);
18933 }
18934