1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation  ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64ISelLowering.h"
14 #include "AArch64CallingConvention.h"
15 #include "AArch64ExpandImm.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64PerfectShuffle.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/ObjCARCUtil.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/CallingConvLower.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstr.h"
42 #include "llvm/CodeGen/MachineInstrBuilder.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/CodeGen/RuntimeLibcalls.h"
46 #include "llvm/CodeGen/SelectionDAG.h"
47 #include "llvm/CodeGen/SelectionDAGNodes.h"
48 #include "llvm/CodeGen/TargetCallingConv.h"
49 #include "llvm/CodeGen/TargetInstrInfo.h"
50 #include "llvm/CodeGen/ValueTypes.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GetElementPtrTypeIterator.h"
58 #include "llvm/IR/GlobalValue.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/IntrinsicsAArch64.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/OperandTraits.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/MC/MCRegisterInfo.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CodeGen.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Compiler.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/KnownBits.h"
79 #include "llvm/Support/MachineValueType.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetMachine.h"
83 #include "llvm/Target/TargetOptions.h"
84 #include <algorithm>
85 #include <bitset>
86 #include <cassert>
87 #include <cctype>
88 #include <cstdint>
89 #include <cstdlib>
90 #include <iterator>
91 #include <limits>
92 #include <tuple>
93 #include <utility>
94 #include <vector>
95 
96 using namespace llvm;
97 using namespace llvm::PatternMatch;
98 
99 #define DEBUG_TYPE "aarch64-lower"
100 
101 STATISTIC(NumTailCalls, "Number of tail calls");
102 STATISTIC(NumShiftInserts, "Number of vector shift inserts");
103 STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
104 
105 // FIXME: The necessary dtprel relocations don't seem to be supported
106 // well in the GNU bfd and gold linkers at the moment. Therefore, by
107 // default, for now, fall back to GeneralDynamic code generation.
108 cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
109     "aarch64-elf-ldtls-generation", cl::Hidden,
110     cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
111     cl::init(false));
112 
113 static cl::opt<bool>
114 EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
115                          cl::desc("Enable AArch64 logical imm instruction "
116                                   "optimization"),
117                          cl::init(true));
118 
119 // Temporary option added for the purpose of testing functionality added
120 // to DAGCombiner.cpp in D92230. It is expected that this can be removed
121 // in future when both implementations will be based off MGATHER rather
122 // than the GLD1 nodes added for the SVE gather load intrinsics.
123 static cl::opt<bool>
124 EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
125                                 cl::desc("Combine extends of AArch64 masked "
126                                          "gather intrinsics"),
127                                 cl::init(true));
128 
129 /// Value type used for condition codes.
130 static const MVT MVT_CC = MVT::i32;
131 
132 static inline EVT getPackedSVEVectorVT(EVT VT) {
133   switch (VT.getSimpleVT().SimpleTy) {
134   default:
135     llvm_unreachable("unexpected element type for vector");
136   case MVT::i8:
137     return MVT::nxv16i8;
138   case MVT::i16:
139     return MVT::nxv8i16;
140   case MVT::i32:
141     return MVT::nxv4i32;
142   case MVT::i64:
143     return MVT::nxv2i64;
144   case MVT::f16:
145     return MVT::nxv8f16;
146   case MVT::f32:
147     return MVT::nxv4f32;
148   case MVT::f64:
149     return MVT::nxv2f64;
150   case MVT::bf16:
151     return MVT::nxv8bf16;
152   }
153 }
154 
155 // NOTE: Currently there's only a need to return integer vector types. If this
156 // changes then just add an extra "type" parameter.
157 static inline EVT getPackedSVEVectorVT(ElementCount EC) {
158   switch (EC.getKnownMinValue()) {
159   default:
160     llvm_unreachable("unexpected element count for vector");
161   case 16:
162     return MVT::nxv16i8;
163   case 8:
164     return MVT::nxv8i16;
165   case 4:
166     return MVT::nxv4i32;
167   case 2:
168     return MVT::nxv2i64;
169   }
170 }
171 
172 static inline EVT getPromotedVTForPredicate(EVT VT) {
173   assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&
174          "Expected scalable predicate vector type!");
175   switch (VT.getVectorMinNumElements()) {
176   default:
177     llvm_unreachable("unexpected element count for vector");
178   case 2:
179     return MVT::nxv2i64;
180   case 4:
181     return MVT::nxv4i32;
182   case 8:
183     return MVT::nxv8i16;
184   case 16:
185     return MVT::nxv16i8;
186   }
187 }
188 
189 /// Returns true if VT's elements occupy the lowest bit positions of its
190 /// associated register class without any intervening space.
191 ///
192 /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
193 /// same register class, but only nxv8f16 can be treated as a packed vector.
194 static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {
195   assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
196          "Expected legal vector type!");
197   return VT.isFixedLengthVector() ||
198          VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock;
199 }
200 
201 // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
202 // predicate and end with a passthru value matching the result type.
203 static bool isMergePassthruOpcode(unsigned Opc) {
204   switch (Opc) {
205   default:
206     return false;
207   case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
208   case AArch64ISD::BSWAP_MERGE_PASSTHRU:
209   case AArch64ISD::REVH_MERGE_PASSTHRU:
210   case AArch64ISD::REVW_MERGE_PASSTHRU:
211   case AArch64ISD::REVD_MERGE_PASSTHRU:
212   case AArch64ISD::CTLZ_MERGE_PASSTHRU:
213   case AArch64ISD::CTPOP_MERGE_PASSTHRU:
214   case AArch64ISD::DUP_MERGE_PASSTHRU:
215   case AArch64ISD::ABS_MERGE_PASSTHRU:
216   case AArch64ISD::NEG_MERGE_PASSTHRU:
217   case AArch64ISD::FNEG_MERGE_PASSTHRU:
218   case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
219   case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
220   case AArch64ISD::FCEIL_MERGE_PASSTHRU:
221   case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
222   case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
223   case AArch64ISD::FRINT_MERGE_PASSTHRU:
224   case AArch64ISD::FROUND_MERGE_PASSTHRU:
225   case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
226   case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
227   case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
228   case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
229   case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
230   case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU:
231   case AArch64ISD::FCVTZU_MERGE_PASSTHRU:
232   case AArch64ISD::FCVTZS_MERGE_PASSTHRU:
233   case AArch64ISD::FSQRT_MERGE_PASSTHRU:
234   case AArch64ISD::FRECPX_MERGE_PASSTHRU:
235   case AArch64ISD::FABS_MERGE_PASSTHRU:
236     return true;
237   }
238 }
239 
240 // Returns true if inactive lanes are known to be zeroed by construction.
241 static bool isZeroingInactiveLanes(SDValue Op) {
242   switch (Op.getOpcode()) {
243   default:
244     // We guarantee i1 splat_vectors to zero the other lanes by
245     // implementing it with ptrue and possibly a punpklo for nxv1i1.
246     if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
247       return true;
248     return false;
249   case AArch64ISD::PTRUE:
250   case AArch64ISD::SETCC_MERGE_ZERO:
251     return true;
252   case ISD::INTRINSIC_WO_CHAIN:
253     switch (Op.getConstantOperandVal(0)) {
254     default:
255       return false;
256     case Intrinsic::aarch64_sve_ptrue:
257     case Intrinsic::aarch64_sve_pnext:
258     case Intrinsic::aarch64_sve_cmpeq_wide:
259     case Intrinsic::aarch64_sve_cmpne_wide:
260     case Intrinsic::aarch64_sve_cmpge_wide:
261     case Intrinsic::aarch64_sve_cmpgt_wide:
262     case Intrinsic::aarch64_sve_cmplt_wide:
263     case Intrinsic::aarch64_sve_cmple_wide:
264     case Intrinsic::aarch64_sve_cmphs_wide:
265     case Intrinsic::aarch64_sve_cmphi_wide:
266     case Intrinsic::aarch64_sve_cmplo_wide:
267     case Intrinsic::aarch64_sve_cmpls_wide:
268       return true;
269     }
270   }
271 }
272 
273 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
274                                              const AArch64Subtarget &STI)
275     : TargetLowering(TM), Subtarget(&STI) {
276   // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
277   // we have to make something up. Arbitrarily, choose ZeroOrOne.
278   setBooleanContents(ZeroOrOneBooleanContent);
279   // When comparing vectors the result sets the different elements in the
280   // vector to all-one or all-zero.
281   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
282 
283   // Set up the register classes.
284   addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
285   addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
286 
287   if (Subtarget->hasLS64()) {
288     addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass);
289     setOperationAction(ISD::LOAD, MVT::i64x8, Custom);
290     setOperationAction(ISD::STORE, MVT::i64x8, Custom);
291   }
292 
293   if (Subtarget->hasFPARMv8()) {
294     addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
295     addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
296     addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
297     addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
298     addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
299   }
300 
301   if (Subtarget->hasNEON()) {
302     addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
303     addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
304     // Someone set us up the NEON.
305     addDRTypeForNEON(MVT::v2f32);
306     addDRTypeForNEON(MVT::v8i8);
307     addDRTypeForNEON(MVT::v4i16);
308     addDRTypeForNEON(MVT::v2i32);
309     addDRTypeForNEON(MVT::v1i64);
310     addDRTypeForNEON(MVT::v1f64);
311     addDRTypeForNEON(MVT::v4f16);
312     if (Subtarget->hasBF16())
313       addDRTypeForNEON(MVT::v4bf16);
314 
315     addQRTypeForNEON(MVT::v4f32);
316     addQRTypeForNEON(MVT::v2f64);
317     addQRTypeForNEON(MVT::v16i8);
318     addQRTypeForNEON(MVT::v8i16);
319     addQRTypeForNEON(MVT::v4i32);
320     addQRTypeForNEON(MVT::v2i64);
321     addQRTypeForNEON(MVT::v8f16);
322     if (Subtarget->hasBF16())
323       addQRTypeForNEON(MVT::v8bf16);
324   }
325 
326   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
327     // Add legal sve predicate types
328     addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass);
329     addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
330     addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
331     addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
332     addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
333 
334     // Add legal sve data types
335     addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
336     addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
337     addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
338     addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
339 
340     addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
341     addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
342     addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
343     addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
344     addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
345     addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
346 
347     if (Subtarget->hasBF16()) {
348       addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass);
349       addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass);
350       addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass);
351     }
352 
353     if (Subtarget->useSVEForFixedLengthVectors()) {
354       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
355         if (useSVEForFixedLengthVectorVT(VT))
356           addRegisterClass(VT, &AArch64::ZPRRegClass);
357 
358       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
359         if (useSVEForFixedLengthVectorVT(VT))
360           addRegisterClass(VT, &AArch64::ZPRRegClass);
361     }
362   }
363 
364   // Compute derived properties from the register classes
365   computeRegisterProperties(Subtarget->getRegisterInfo());
366 
367   // Provide all sorts of operation actions
368   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
369   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
370   setOperationAction(ISD::SETCC, MVT::i32, Custom);
371   setOperationAction(ISD::SETCC, MVT::i64, Custom);
372   setOperationAction(ISD::SETCC, MVT::f16, Custom);
373   setOperationAction(ISD::SETCC, MVT::f32, Custom);
374   setOperationAction(ISD::SETCC, MVT::f64, Custom);
375   setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
376   setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
377   setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
378   setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
379   setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
380   setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
381   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
382   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
383   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
384   setOperationAction(ISD::BR_CC, MVT::i32, Custom);
385   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
386   setOperationAction(ISD::BR_CC, MVT::f16, Custom);
387   setOperationAction(ISD::BR_CC, MVT::f32, Custom);
388   setOperationAction(ISD::BR_CC, MVT::f64, Custom);
389   setOperationAction(ISD::SELECT, MVT::i32, Custom);
390   setOperationAction(ISD::SELECT, MVT::i64, Custom);
391   setOperationAction(ISD::SELECT, MVT::f16, Custom);
392   setOperationAction(ISD::SELECT, MVT::f32, Custom);
393   setOperationAction(ISD::SELECT, MVT::f64, Custom);
394   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
395   setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
396   setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
397   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
398   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
399   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
400   setOperationAction(ISD::JumpTable, MVT::i64, Custom);
401 
402   setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
403   setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
404   setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
405 
406   setOperationAction(ISD::FREM, MVT::f32, Expand);
407   setOperationAction(ISD::FREM, MVT::f64, Expand);
408   setOperationAction(ISD::FREM, MVT::f80, Expand);
409 
410   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
411 
412   // Custom lowering hooks are needed for XOR
413   // to fold it into CSINC/CSINV.
414   setOperationAction(ISD::XOR, MVT::i32, Custom);
415   setOperationAction(ISD::XOR, MVT::i64, Custom);
416 
417   // Virtually no operation on f128 is legal, but LLVM can't expand them when
418   // there's a valid register class, so we need custom operations in most cases.
419   setOperationAction(ISD::FABS, MVT::f128, Expand);
420   setOperationAction(ISD::FADD, MVT::f128, LibCall);
421   setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
422   setOperationAction(ISD::FCOS, MVT::f128, Expand);
423   setOperationAction(ISD::FDIV, MVT::f128, LibCall);
424   setOperationAction(ISD::FMA, MVT::f128, Expand);
425   setOperationAction(ISD::FMUL, MVT::f128, LibCall);
426   setOperationAction(ISD::FNEG, MVT::f128, Expand);
427   setOperationAction(ISD::FPOW, MVT::f128, Expand);
428   setOperationAction(ISD::FREM, MVT::f128, Expand);
429   setOperationAction(ISD::FRINT, MVT::f128, Expand);
430   setOperationAction(ISD::FSIN, MVT::f128, Expand);
431   setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
432   setOperationAction(ISD::FSQRT, MVT::f128, Expand);
433   setOperationAction(ISD::FSUB, MVT::f128, LibCall);
434   setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
435   setOperationAction(ISD::SETCC, MVT::f128, Custom);
436   setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
437   setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
438   setOperationAction(ISD::BR_CC, MVT::f128, Custom);
439   setOperationAction(ISD::SELECT, MVT::f128, Custom);
440   setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
441   setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
442   // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently
443   // aren't handled.
444 
445   // Lowering for many of the conversions is actually specified by the non-f128
446   // type. The LowerXXX function will be trivial when f128 isn't involved.
447   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
448   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
449   setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
450   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
451   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
452   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
453   setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
454   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
455   setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
456   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
457   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
458   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
459   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
460   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
461   setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
462   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
463   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
464   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
465   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
466   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
467   setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
468   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
469   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
470   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
471   setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
472   setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
473   setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
474   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
475   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
476   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
477 
478   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
479   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
480   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
481   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
482 
483   // Variable arguments.
484   setOperationAction(ISD::VASTART, MVT::Other, Custom);
485   setOperationAction(ISD::VAARG, MVT::Other, Custom);
486   setOperationAction(ISD::VACOPY, MVT::Other, Custom);
487   setOperationAction(ISD::VAEND, MVT::Other, Expand);
488 
489   // Variable-sized objects.
490   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
491   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
492 
493   if (Subtarget->isTargetWindows())
494     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
495   else
496     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
497 
498   // Constant pool entries
499   setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
500 
501   // BlockAddress
502   setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
503 
504   // AArch64 lacks both left-rotate and popcount instructions.
505   setOperationAction(ISD::ROTL, MVT::i32, Expand);
506   setOperationAction(ISD::ROTL, MVT::i64, Expand);
507   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
508     setOperationAction(ISD::ROTL, VT, Expand);
509     setOperationAction(ISD::ROTR, VT, Expand);
510   }
511 
512   // AArch64 doesn't have i32 MULH{S|U}.
513   setOperationAction(ISD::MULHU, MVT::i32, Expand);
514   setOperationAction(ISD::MULHS, MVT::i32, Expand);
515 
516   // AArch64 doesn't have {U|S}MUL_LOHI.
517   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
518   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
519 
520   setOperationAction(ISD::CTPOP, MVT::i32, Custom);
521   setOperationAction(ISD::CTPOP, MVT::i64, Custom);
522   setOperationAction(ISD::CTPOP, MVT::i128, Custom);
523 
524   setOperationAction(ISD::PARITY, MVT::i64, Custom);
525   setOperationAction(ISD::PARITY, MVT::i128, Custom);
526 
527   setOperationAction(ISD::ABS, MVT::i32, Custom);
528   setOperationAction(ISD::ABS, MVT::i64, Custom);
529 
530   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
531   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
532   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
533     setOperationAction(ISD::SDIVREM, VT, Expand);
534     setOperationAction(ISD::UDIVREM, VT, Expand);
535   }
536   setOperationAction(ISD::SREM, MVT::i32, Expand);
537   setOperationAction(ISD::SREM, MVT::i64, Expand);
538   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
539   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
540   setOperationAction(ISD::UREM, MVT::i32, Expand);
541   setOperationAction(ISD::UREM, MVT::i64, Expand);
542 
543   // Custom lower Add/Sub/Mul with overflow.
544   setOperationAction(ISD::SADDO, MVT::i32, Custom);
545   setOperationAction(ISD::SADDO, MVT::i64, Custom);
546   setOperationAction(ISD::UADDO, MVT::i32, Custom);
547   setOperationAction(ISD::UADDO, MVT::i64, Custom);
548   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
549   setOperationAction(ISD::SSUBO, MVT::i64, Custom);
550   setOperationAction(ISD::USUBO, MVT::i32, Custom);
551   setOperationAction(ISD::USUBO, MVT::i64, Custom);
552   setOperationAction(ISD::SMULO, MVT::i32, Custom);
553   setOperationAction(ISD::SMULO, MVT::i64, Custom);
554   setOperationAction(ISD::UMULO, MVT::i32, Custom);
555   setOperationAction(ISD::UMULO, MVT::i64, Custom);
556 
557   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
558   setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
559   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
560   setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
561   setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
562   setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
563   setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
564   setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
565 
566   setOperationAction(ISD::FSIN, MVT::f32, Expand);
567   setOperationAction(ISD::FSIN, MVT::f64, Expand);
568   setOperationAction(ISD::FCOS, MVT::f32, Expand);
569   setOperationAction(ISD::FCOS, MVT::f64, Expand);
570   setOperationAction(ISD::FPOW, MVT::f32, Expand);
571   setOperationAction(ISD::FPOW, MVT::f64, Expand);
572   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
573   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
574   if (Subtarget->hasFullFP16())
575     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom);
576   else
577     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
578 
579   for (auto Op : {ISD::FREM,        ISD::FPOW,         ISD::FPOWI,
580                   ISD::FCOS,        ISD::FSIN,         ISD::FSINCOS,
581                   ISD::FEXP,        ISD::FEXP2,        ISD::FLOG,
582                   ISD::FLOG2,       ISD::FLOG10,       ISD::STRICT_FREM,
583                   ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS,
584                   ISD::STRICT_FSIN, ISD::STRICT_FEXP,  ISD::STRICT_FEXP2,
585                   ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) {
586     setOperationAction(Op, MVT::f16, Promote);
587     setOperationAction(Op, MVT::v4f16, Expand);
588     setOperationAction(Op, MVT::v8f16, Expand);
589   }
590 
591   if (!Subtarget->hasFullFP16()) {
592     for (auto Op :
593          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
594           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
595           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
596           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
597           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
598           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
599           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
600           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
601           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
602           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
603           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
604           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
605           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
606           ISD::STRICT_FMAXIMUM})
607       setOperationAction(Op, MVT::f16, Promote);
608 
609     // Round-to-integer need custom lowering for fp16, as Promote doesn't work
610     // because the result type is integer.
611     for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
612                     ISD::STRICT_LLRINT})
613       setOperationAction(Op, MVT::f16, Custom);
614 
615     // promote v4f16 to v4f32 when that is known to be safe.
616     setOperationAction(ISD::FADD,        MVT::v4f16, Promote);
617     setOperationAction(ISD::FSUB,        MVT::v4f16, Promote);
618     setOperationAction(ISD::FMUL,        MVT::v4f16, Promote);
619     setOperationAction(ISD::FDIV,        MVT::v4f16, Promote);
620     AddPromotedToType(ISD::FADD,         MVT::v4f16, MVT::v4f32);
621     AddPromotedToType(ISD::FSUB,         MVT::v4f16, MVT::v4f32);
622     AddPromotedToType(ISD::FMUL,         MVT::v4f16, MVT::v4f32);
623     AddPromotedToType(ISD::FDIV,         MVT::v4f16, MVT::v4f32);
624 
625     setOperationAction(ISD::FABS,        MVT::v4f16, Expand);
626     setOperationAction(ISD::FNEG,        MVT::v4f16, Expand);
627     setOperationAction(ISD::FROUND,      MVT::v4f16, Expand);
628     setOperationAction(ISD::FROUNDEVEN,  MVT::v4f16, Expand);
629     setOperationAction(ISD::FMA,         MVT::v4f16, Expand);
630     setOperationAction(ISD::SETCC,       MVT::v4f16, Expand);
631     setOperationAction(ISD::BR_CC,       MVT::v4f16, Expand);
632     setOperationAction(ISD::SELECT,      MVT::v4f16, Expand);
633     setOperationAction(ISD::SELECT_CC,   MVT::v4f16, Expand);
634     setOperationAction(ISD::FTRUNC,      MVT::v4f16, Expand);
635     setOperationAction(ISD::FCOPYSIGN,   MVT::v4f16, Expand);
636     setOperationAction(ISD::FFLOOR,      MVT::v4f16, Expand);
637     setOperationAction(ISD::FCEIL,       MVT::v4f16, Expand);
638     setOperationAction(ISD::FRINT,       MVT::v4f16, Expand);
639     setOperationAction(ISD::FNEARBYINT,  MVT::v4f16, Expand);
640     setOperationAction(ISD::FSQRT,       MVT::v4f16, Expand);
641 
642     setOperationAction(ISD::FABS,        MVT::v8f16, Expand);
643     setOperationAction(ISD::FADD,        MVT::v8f16, Expand);
644     setOperationAction(ISD::FCEIL,       MVT::v8f16, Expand);
645     setOperationAction(ISD::FCOPYSIGN,   MVT::v8f16, Expand);
646     setOperationAction(ISD::FDIV,        MVT::v8f16, Expand);
647     setOperationAction(ISD::FFLOOR,      MVT::v8f16, Expand);
648     setOperationAction(ISD::FMA,         MVT::v8f16, Expand);
649     setOperationAction(ISD::FMUL,        MVT::v8f16, Expand);
650     setOperationAction(ISD::FNEARBYINT,  MVT::v8f16, Expand);
651     setOperationAction(ISD::FNEG,        MVT::v8f16, Expand);
652     setOperationAction(ISD::FROUND,      MVT::v8f16, Expand);
653     setOperationAction(ISD::FROUNDEVEN,  MVT::v8f16, Expand);
654     setOperationAction(ISD::FRINT,       MVT::v8f16, Expand);
655     setOperationAction(ISD::FSQRT,       MVT::v8f16, Expand);
656     setOperationAction(ISD::FSUB,        MVT::v8f16, Expand);
657     setOperationAction(ISD::FTRUNC,      MVT::v8f16, Expand);
658     setOperationAction(ISD::SETCC,       MVT::v8f16, Expand);
659     setOperationAction(ISD::BR_CC,       MVT::v8f16, Expand);
660     setOperationAction(ISD::SELECT,      MVT::v8f16, Expand);
661     setOperationAction(ISD::SELECT_CC,   MVT::v8f16, Expand);
662     setOperationAction(ISD::FP_EXTEND,   MVT::v8f16, Expand);
663   }
664 
665   // AArch64 has implementations of a lot of rounding-like FP operations.
666   for (auto Op :
667        {ISD::FFLOOR,          ISD::FNEARBYINT,      ISD::FCEIL,
668         ISD::FRINT,           ISD::FTRUNC,          ISD::FROUND,
669         ISD::FROUNDEVEN,      ISD::FMINNUM,         ISD::FMAXNUM,
670         ISD::FMINIMUM,        ISD::FMAXIMUM,        ISD::LROUND,
671         ISD::LLROUND,         ISD::LRINT,           ISD::LLRINT,
672         ISD::STRICT_FFLOOR,   ISD::STRICT_FCEIL,    ISD::STRICT_FNEARBYINT,
673         ISD::STRICT_FRINT,    ISD::STRICT_FTRUNC,   ISD::STRICT_FROUNDEVEN,
674         ISD::STRICT_FROUND,   ISD::STRICT_FMINNUM,  ISD::STRICT_FMAXNUM,
675         ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND,
676         ISD::STRICT_LLROUND,  ISD::STRICT_LRINT,    ISD::STRICT_LLRINT}) {
677     for (MVT Ty : {MVT::f32, MVT::f64})
678       setOperationAction(Op, Ty, Legal);
679     if (Subtarget->hasFullFP16())
680       setOperationAction(Op, MVT::f16, Legal);
681   }
682 
683   // Basic strict FP operations are legal
684   for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
685                   ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) {
686     for (MVT Ty : {MVT::f32, MVT::f64})
687       setOperationAction(Op, Ty, Legal);
688     if (Subtarget->hasFullFP16())
689       setOperationAction(Op, MVT::f16, Legal);
690   }
691 
692   // Strict conversion to a larger type is legal
693   for (auto VT : {MVT::f32, MVT::f64})
694     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
695 
696   setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
697 
698   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
699   setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
700 
701   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
702   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
703   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
704   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
705   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
706 
707   // Generate outline atomics library calls only if LSE was not specified for
708   // subtarget
709   if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) {
710     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall);
711     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall);
712     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
713     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall);
714     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall);
715     setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall);
716     setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall);
717     setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
718     setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall);
719     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall);
720     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall);
721     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
722     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall);
723     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall);
724     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall);
725     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
726     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall);
727     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall);
728     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall);
729     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall);
730     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall);
731     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall);
732     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall);
733     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
734     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall);
735 #define LCALLNAMES(A, B, N)                                                    \
736   setLibcallName(A##N##_RELAX, #B #N "_relax");                                \
737   setLibcallName(A##N##_ACQ, #B #N "_acq");                                    \
738   setLibcallName(A##N##_REL, #B #N "_rel");                                    \
739   setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
740 #define LCALLNAME4(A, B)                                                       \
741   LCALLNAMES(A, B, 1)                                                          \
742   LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
743 #define LCALLNAME5(A, B)                                                       \
744   LCALLNAMES(A, B, 1)                                                          \
745   LCALLNAMES(A, B, 2)                                                          \
746   LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)
747     LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas)
748     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp)
749     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd)
750     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset)
751     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr)
752     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor)
753 #undef LCALLNAMES
754 #undef LCALLNAME4
755 #undef LCALLNAME5
756   }
757 
758   // 128-bit loads and stores can be done without expanding
759   setOperationAction(ISD::LOAD, MVT::i128, Custom);
760   setOperationAction(ISD::STORE, MVT::i128, Custom);
761 
762   // Aligned 128-bit loads and stores are single-copy atomic according to the
763   // v8.4a spec.
764   if (Subtarget->hasLSE2()) {
765     setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
766     setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
767   }
768 
769   // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the
770   // custom lowering, as there are no un-paired non-temporal stores and
771   // legalization will break up 256 bit inputs.
772   setOperationAction(ISD::STORE, MVT::v32i8, Custom);
773   setOperationAction(ISD::STORE, MVT::v16i16, Custom);
774   setOperationAction(ISD::STORE, MVT::v16f16, Custom);
775   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
776   setOperationAction(ISD::STORE, MVT::v8f32, Custom);
777   setOperationAction(ISD::STORE, MVT::v4f64, Custom);
778   setOperationAction(ISD::STORE, MVT::v4i64, Custom);
779 
780   // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
781   // This requires the Performance Monitors extension.
782   if (Subtarget->hasPerfMon())
783     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
784 
785   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
786       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
787     // Issue __sincos_stret if available.
788     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
789     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
790   } else {
791     setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
792     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
793   }
794 
795   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
796     // MSVCRT doesn't have powi; fall back to pow
797     setLibcallName(RTLIB::POWI_F32, nullptr);
798     setLibcallName(RTLIB::POWI_F64, nullptr);
799   }
800 
801   // Make floating-point constants legal for the large code model, so they don't
802   // become loads from the constant pool.
803   if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
804     setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
805     setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
806   }
807 
808   // AArch64 does not have floating-point extending loads, i1 sign-extending
809   // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
810   for (MVT VT : MVT::fp_valuetypes()) {
811     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
812     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
813     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
814     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
815   }
816   for (MVT VT : MVT::integer_valuetypes())
817     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
818 
819   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
820   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
821   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
822   setTruncStoreAction(MVT::f128, MVT::f80, Expand);
823   setTruncStoreAction(MVT::f128, MVT::f64, Expand);
824   setTruncStoreAction(MVT::f128, MVT::f32, Expand);
825   setTruncStoreAction(MVT::f128, MVT::f16, Expand);
826 
827   setOperationAction(ISD::BITCAST, MVT::i16, Custom);
828   setOperationAction(ISD::BITCAST, MVT::f16, Custom);
829   setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
830 
831   // Indexed loads and stores are supported.
832   for (unsigned im = (unsigned)ISD::PRE_INC;
833        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
834     setIndexedLoadAction(im, MVT::i8, Legal);
835     setIndexedLoadAction(im, MVT::i16, Legal);
836     setIndexedLoadAction(im, MVT::i32, Legal);
837     setIndexedLoadAction(im, MVT::i64, Legal);
838     setIndexedLoadAction(im, MVT::f64, Legal);
839     setIndexedLoadAction(im, MVT::f32, Legal);
840     setIndexedLoadAction(im, MVT::f16, Legal);
841     setIndexedLoadAction(im, MVT::bf16, Legal);
842     setIndexedStoreAction(im, MVT::i8, Legal);
843     setIndexedStoreAction(im, MVT::i16, Legal);
844     setIndexedStoreAction(im, MVT::i32, Legal);
845     setIndexedStoreAction(im, MVT::i64, Legal);
846     setIndexedStoreAction(im, MVT::f64, Legal);
847     setIndexedStoreAction(im, MVT::f32, Legal);
848     setIndexedStoreAction(im, MVT::f16, Legal);
849     setIndexedStoreAction(im, MVT::bf16, Legal);
850   }
851 
852   // Trap.
853   setOperationAction(ISD::TRAP, MVT::Other, Legal);
854   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
855   setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
856 
857   // We combine OR nodes for bitfield operations.
858   setTargetDAGCombine(ISD::OR);
859   // Try to create BICs for vector ANDs.
860   setTargetDAGCombine(ISD::AND);
861 
862   // Vector add and sub nodes may conceal a high-half opportunity.
863   // Also, try to fold ADD into CSINC/CSINV..
864   setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP,
865                        ISD::UINT_TO_FP});
866 
867   setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
868                        ISD::FP_TO_UINT_SAT, ISD::FDIV});
869 
870   // Try and combine setcc with csel
871   setTargetDAGCombine(ISD::SETCC);
872 
873   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
874 
875   setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND,
876                        ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG,
877                        ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR,
878                        ISD::INSERT_SUBVECTOR, ISD::STORE});
879   if (Subtarget->supportsAddressTopByteIgnored())
880     setTargetDAGCombine(ISD::LOAD);
881 
882   setTargetDAGCombine(ISD::MUL);
883 
884   setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
885 
886   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
887                        ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
888                        ISD::VECREDUCE_ADD, ISD::STEP_VECTOR});
889 
890   setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER});
891 
892   setTargetDAGCombine(ISD::FP_EXTEND);
893 
894   setTargetDAGCombine(ISD::GlobalAddress);
895 
896   // In case of strict alignment, avoid an excessive number of byte wide stores.
897   MaxStoresPerMemsetOptSize = 8;
898   MaxStoresPerMemset =
899       Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32;
900 
901   MaxGluedStoresPerMemcpy = 4;
902   MaxStoresPerMemcpyOptSize = 4;
903   MaxStoresPerMemcpy =
904       Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16;
905 
906   MaxStoresPerMemmoveOptSize = 4;
907   MaxStoresPerMemmove = 4;
908 
909   MaxLoadsPerMemcmpOptSize = 4;
910   MaxLoadsPerMemcmp =
911       Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8;
912 
913   setStackPointerRegisterToSaveRestore(AArch64::SP);
914 
915   setSchedulingPreference(Sched::Hybrid);
916 
917   EnableExtLdPromotion = true;
918 
919   // Set required alignment.
920   setMinFunctionAlignment(Align(4));
921   // Set preferred alignments.
922   setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
923   setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
924   setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
925 
926   // Only change the limit for entries in a jump table if specified by
927   // the sub target, but not at the command line.
928   unsigned MaxJT = STI.getMaximumJumpTableSize();
929   if (MaxJT && getMaximumJumpTableSize() == UINT_MAX)
930     setMaximumJumpTableSize(MaxJT);
931 
932   setHasExtractBitsInsn(true);
933 
934   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
935 
936   if (Subtarget->hasNEON()) {
937     // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
938     // silliness like this:
939     for (auto Op :
940          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
941           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
942           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
943           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
944           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
945           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
946           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
947           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
948           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
949           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
950           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
951           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
952           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
953           ISD::STRICT_FMAXIMUM})
954       setOperationAction(Op, MVT::v1f64, Expand);
955 
956     for (auto Op :
957          {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP,
958           ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL,
959           ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT,
960           ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND})
961       setOperationAction(Op, MVT::v1i64, Expand);
962 
963     // AArch64 doesn't have a direct vector ->f32 conversion instructions for
964     // elements smaller than i32, so promote the input to i32 first.
965     setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32);
966     setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32);
967 
968     // Similarly, there is no direct i32 -> f64 vector conversion instruction.
969     // Or, direct i32 -> f16 vector conversion.  Set it so custom, so the
970     // conversion happens in two steps: v4i32 -> v4f32 -> v4f16
971     for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
972                     ISD::STRICT_UINT_TO_FP})
973       for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32})
974         setOperationAction(Op, VT, Custom);
975 
976     if (Subtarget->hasFullFP16()) {
977       setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
978       setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
979       setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
980       setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
981       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
982       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
983       setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
984       setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
985     } else {
986       // when AArch64 doesn't have fullfp16 support, promote the input
987       // to i32 first.
988       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32);
989       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32);
990       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32);
991       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32);
992       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32);
993       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32);
994       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32);
995       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32);
996     }
997 
998     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
999     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
1000     setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal);
1001     setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal);
1002     setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom);
1003     setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom);
1004     setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1005     setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom);
1006     for (auto VT : {MVT::v1i64, MVT::v2i64}) {
1007       setOperationAction(ISD::UMAX, VT, Custom);
1008       setOperationAction(ISD::SMAX, VT, Custom);
1009       setOperationAction(ISD::UMIN, VT, Custom);
1010       setOperationAction(ISD::SMIN, VT, Custom);
1011     }
1012 
1013     // AArch64 doesn't have MUL.2d:
1014     setOperationAction(ISD::MUL, MVT::v2i64, Expand);
1015     // Custom handling for some quad-vector types to detect MULL.
1016     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
1017     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1018     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1019 
1020     // Saturates
1021     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1022                     MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1023       setOperationAction(ISD::SADDSAT, VT, Legal);
1024       setOperationAction(ISD::UADDSAT, VT, Legal);
1025       setOperationAction(ISD::SSUBSAT, VT, Legal);
1026       setOperationAction(ISD::USUBSAT, VT, Legal);
1027     }
1028 
1029     for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1030                    MVT::v4i32}) {
1031       setOperationAction(ISD::AVGFLOORS, VT, Legal);
1032       setOperationAction(ISD::AVGFLOORU, VT, Legal);
1033       setOperationAction(ISD::AVGCEILS, VT, Legal);
1034       setOperationAction(ISD::AVGCEILU, VT, Legal);
1035       setOperationAction(ISD::ABDS, VT, Legal);
1036       setOperationAction(ISD::ABDU, VT, Legal);
1037     }
1038 
1039     // Vector reductions
1040     for (MVT VT : { MVT::v4f16, MVT::v2f32,
1041                     MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1042       if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) {
1043         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1044         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1045 
1046         setOperationAction(ISD::VECREDUCE_FADD, VT, Legal);
1047       }
1048     }
1049     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1050                     MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1051       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1052       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1053       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1054       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1055       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1056     }
1057     setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
1058 
1059     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
1060     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
1061     // Likewise, narrowing and extending vector loads/stores aren't handled
1062     // directly.
1063     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1064       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
1065 
1066       if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
1067         setOperationAction(ISD::MULHS, VT, Legal);
1068         setOperationAction(ISD::MULHU, VT, Legal);
1069       } else {
1070         setOperationAction(ISD::MULHS, VT, Expand);
1071         setOperationAction(ISD::MULHU, VT, Expand);
1072       }
1073       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1074       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1075 
1076       setOperationAction(ISD::BSWAP, VT, Expand);
1077       setOperationAction(ISD::CTTZ, VT, Expand);
1078 
1079       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
1080         setTruncStoreAction(VT, InnerVT, Expand);
1081         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1082         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1083         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1084       }
1085     }
1086 
1087     // AArch64 has implementations of a lot of rounding-like FP operations.
1088     for (auto Op :
1089          {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
1090           ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR,
1091           ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT,
1092           ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) {
1093       for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64})
1094         setOperationAction(Op, Ty, Legal);
1095       if (Subtarget->hasFullFP16())
1096         for (MVT Ty : {MVT::v4f16, MVT::v8f16})
1097           setOperationAction(Op, Ty, Legal);
1098     }
1099 
1100     setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
1101 
1102     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i16, MVT::v4i8, Custom);
1103     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1104     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1105     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i32, MVT::v4i8, Custom);
1106     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1107     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1108 
1109     // ADDP custom lowering
1110     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1111       setOperationAction(ISD::ADD, VT, Custom);
1112     // FADDP custom lowering
1113     for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1114       setOperationAction(ISD::FADD, VT, Custom);
1115   }
1116 
1117   if (Subtarget->hasSME()) {
1118     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1119   }
1120 
1121   // FIXME: Move lowering for more nodes here if those are common between
1122   // SVE and SME.
1123   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
1124     for (auto VT :
1125          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1126       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1127       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1128     }
1129   }
1130 
1131   if (Subtarget->hasSVE()) {
1132     for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
1133       setOperationAction(ISD::BITREVERSE, VT, Custom);
1134       setOperationAction(ISD::BSWAP, VT, Custom);
1135       setOperationAction(ISD::CTLZ, VT, Custom);
1136       setOperationAction(ISD::CTPOP, VT, Custom);
1137       setOperationAction(ISD::CTTZ, VT, Custom);
1138       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1139       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1140       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1141       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1142       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1143       setOperationAction(ISD::MGATHER, VT, Custom);
1144       setOperationAction(ISD::MSCATTER, VT, Custom);
1145       setOperationAction(ISD::MLOAD, VT, Custom);
1146       setOperationAction(ISD::MUL, VT, Custom);
1147       setOperationAction(ISD::MULHS, VT, Custom);
1148       setOperationAction(ISD::MULHU, VT, Custom);
1149       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1150       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1151       setOperationAction(ISD::SELECT, VT, Custom);
1152       setOperationAction(ISD::SETCC, VT, Custom);
1153       setOperationAction(ISD::SDIV, VT, Custom);
1154       setOperationAction(ISD::UDIV, VT, Custom);
1155       setOperationAction(ISD::SMIN, VT, Custom);
1156       setOperationAction(ISD::UMIN, VT, Custom);
1157       setOperationAction(ISD::SMAX, VT, Custom);
1158       setOperationAction(ISD::UMAX, VT, Custom);
1159       setOperationAction(ISD::SHL, VT, Custom);
1160       setOperationAction(ISD::SRL, VT, Custom);
1161       setOperationAction(ISD::SRA, VT, Custom);
1162       setOperationAction(ISD::ABS, VT, Custom);
1163       setOperationAction(ISD::ABDS, VT, Custom);
1164       setOperationAction(ISD::ABDU, VT, Custom);
1165       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1166       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1167       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1168       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1169       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1170       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1171       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1172       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1173 
1174       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1175       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1176       setOperationAction(ISD::SELECT_CC, VT, Expand);
1177       setOperationAction(ISD::ROTL, VT, Expand);
1178       setOperationAction(ISD::ROTR, VT, Expand);
1179 
1180       setOperationAction(ISD::SADDSAT, VT, Legal);
1181       setOperationAction(ISD::UADDSAT, VT, Legal);
1182       setOperationAction(ISD::SSUBSAT, VT, Legal);
1183       setOperationAction(ISD::USUBSAT, VT, Legal);
1184       setOperationAction(ISD::UREM, VT, Expand);
1185       setOperationAction(ISD::SREM, VT, Expand);
1186       setOperationAction(ISD::SDIVREM, VT, Expand);
1187       setOperationAction(ISD::UDIVREM, VT, Expand);
1188     }
1189 
1190     // Illegal unpacked integer vector types.
1191     for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
1192       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1193       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1194     }
1195 
1196     // Legalize unpacked bitcasts to REINTERPRET_CAST.
1197     for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
1198                     MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
1199       setOperationAction(ISD::BITCAST, VT, Custom);
1200 
1201     for (auto VT :
1202          { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
1203            MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
1204       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
1205 
1206     for (auto VT :
1207          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1208       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1209       setOperationAction(ISD::SELECT, VT, Custom);
1210       setOperationAction(ISD::SETCC, VT, Custom);
1211       setOperationAction(ISD::TRUNCATE, VT, Custom);
1212       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1213       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1214       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1215 
1216       setOperationAction(ISD::SELECT_CC, VT, Expand);
1217       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1218       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1219 
1220       // There are no legal MVT::nxv16f## based types.
1221       if (VT != MVT::nxv16i1) {
1222         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1223         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1224       }
1225     }
1226 
1227     // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does
1228     for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64,
1229                     MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1230                     MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1231       setOperationAction(ISD::MLOAD, VT, Custom);
1232       setOperationAction(ISD::MSTORE, VT, Custom);
1233       setOperationAction(ISD::MGATHER, VT, Custom);
1234       setOperationAction(ISD::MSCATTER, VT, Custom);
1235     }
1236 
1237     // Firstly, exclude all scalable vector extending loads/truncating stores,
1238     // include both integer and floating scalable vector.
1239     for (MVT VT : MVT::scalable_vector_valuetypes()) {
1240       for (MVT InnerVT : MVT::scalable_vector_valuetypes()) {
1241         setTruncStoreAction(VT, InnerVT, Expand);
1242         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1243         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1244         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1245       }
1246     }
1247 
1248     // Then, selectively enable those which we directly support.
1249     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal);
1250     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal);
1251     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal);
1252     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal);
1253     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal);
1254     setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal);
1255     for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1256       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal);
1257       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);
1258       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal);
1259       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal);
1260       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal);
1261       setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal);
1262     }
1263 
1264     // SVE supports truncating stores of 64 and 128-bit vectors
1265     setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
1266     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);
1267     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom);
1268     setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
1269     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
1270 
1271     for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
1272                     MVT::nxv4f32, MVT::nxv2f64}) {
1273       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1274       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1275       setOperationAction(ISD::MGATHER, VT, Custom);
1276       setOperationAction(ISD::MSCATTER, VT, Custom);
1277       setOperationAction(ISD::MLOAD, VT, Custom);
1278       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1279       setOperationAction(ISD::SELECT, VT, Custom);
1280       setOperationAction(ISD::FADD, VT, Custom);
1281       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1282       setOperationAction(ISD::FDIV, VT, Custom);
1283       setOperationAction(ISD::FMA, VT, Custom);
1284       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1285       setOperationAction(ISD::FMAXNUM, VT, Custom);
1286       setOperationAction(ISD::FMINIMUM, VT, Custom);
1287       setOperationAction(ISD::FMINNUM, VT, Custom);
1288       setOperationAction(ISD::FMUL, VT, Custom);
1289       setOperationAction(ISD::FNEG, VT, Custom);
1290       setOperationAction(ISD::FSUB, VT, Custom);
1291       setOperationAction(ISD::FCEIL, VT, Custom);
1292       setOperationAction(ISD::FFLOOR, VT, Custom);
1293       setOperationAction(ISD::FNEARBYINT, VT, Custom);
1294       setOperationAction(ISD::FRINT, VT, Custom);
1295       setOperationAction(ISD::FROUND, VT, Custom);
1296       setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1297       setOperationAction(ISD::FTRUNC, VT, Custom);
1298       setOperationAction(ISD::FSQRT, VT, Custom);
1299       setOperationAction(ISD::FABS, VT, Custom);
1300       setOperationAction(ISD::FP_EXTEND, VT, Custom);
1301       setOperationAction(ISD::FP_ROUND, VT, Custom);
1302       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1303       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1304       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1305       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1306       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1307 
1308       setOperationAction(ISD::SELECT_CC, VT, Expand);
1309       setOperationAction(ISD::FREM, VT, Expand);
1310       setOperationAction(ISD::FPOW, VT, Expand);
1311       setOperationAction(ISD::FPOWI, VT, Expand);
1312       setOperationAction(ISD::FCOS, VT, Expand);
1313       setOperationAction(ISD::FSIN, VT, Expand);
1314       setOperationAction(ISD::FSINCOS, VT, Expand);
1315       setOperationAction(ISD::FEXP, VT, Expand);
1316       setOperationAction(ISD::FEXP2, VT, Expand);
1317       setOperationAction(ISD::FLOG, VT, Expand);
1318       setOperationAction(ISD::FLOG2, VT, Expand);
1319       setOperationAction(ISD::FLOG10, VT, Expand);
1320 
1321       setCondCodeAction(ISD::SETO, VT, Expand);
1322       setCondCodeAction(ISD::SETOLT, VT, Expand);
1323       setCondCodeAction(ISD::SETLT, VT, Expand);
1324       setCondCodeAction(ISD::SETOLE, VT, Expand);
1325       setCondCodeAction(ISD::SETLE, VT, Expand);
1326       setCondCodeAction(ISD::SETULT, VT, Expand);
1327       setCondCodeAction(ISD::SETULE, VT, Expand);
1328       setCondCodeAction(ISD::SETUGE, VT, Expand);
1329       setCondCodeAction(ISD::SETUGT, VT, Expand);
1330       setCondCodeAction(ISD::SETUEQ, VT, Expand);
1331       setCondCodeAction(ISD::SETONE, VT, Expand);
1332     }
1333 
1334     for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
1335       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1336       setOperationAction(ISD::MGATHER, VT, Custom);
1337       setOperationAction(ISD::MSCATTER, VT, Custom);
1338       setOperationAction(ISD::MLOAD, VT, Custom);
1339       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1340       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1341     }
1342 
1343     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
1344     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
1345 
1346     // NEON doesn't support integer divides, but SVE does
1347     for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
1348                     MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1349       setOperationAction(ISD::SDIV, VT, Custom);
1350       setOperationAction(ISD::UDIV, VT, Custom);
1351     }
1352 
1353     // NEON doesn't support 64-bit vector integer muls, but SVE does.
1354     setOperationAction(ISD::MUL, MVT::v1i64, Custom);
1355     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1356 
1357     // NOTE: Currently this has to happen after computeRegisterProperties rather
1358     // than the preferred option of combining it with the addRegisterClass call.
1359     if (Subtarget->useSVEForFixedLengthVectors()) {
1360       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
1361         if (useSVEForFixedLengthVectorVT(VT))
1362           addTypeForFixedLengthSVE(VT);
1363       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
1364         if (useSVEForFixedLengthVectorVT(VT))
1365           addTypeForFixedLengthSVE(VT);
1366 
1367       // 64bit results can mean a bigger than NEON input.
1368       for (auto VT : {MVT::v8i8, MVT::v4i16})
1369         setOperationAction(ISD::TRUNCATE, VT, Custom);
1370       setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
1371 
1372       // 128bit results imply a bigger than NEON input.
1373       for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
1374         setOperationAction(ISD::TRUNCATE, VT, Custom);
1375       for (auto VT : {MVT::v8f16, MVT::v4f32})
1376         setOperationAction(ISD::FP_ROUND, VT, Custom);
1377 
1378       // These operations are not supported on NEON but SVE can do them.
1379       setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1380       setOperationAction(ISD::CTLZ, MVT::v1i64, Custom);
1381       setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1382       setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
1383       setOperationAction(ISD::MULHS, MVT::v1i64, Custom);
1384       setOperationAction(ISD::MULHS, MVT::v2i64, Custom);
1385       setOperationAction(ISD::MULHU, MVT::v1i64, Custom);
1386       setOperationAction(ISD::MULHU, MVT::v2i64, Custom);
1387       setOperationAction(ISD::SMAX, MVT::v1i64, Custom);
1388       setOperationAction(ISD::SMAX, MVT::v2i64, Custom);
1389       setOperationAction(ISD::SMIN, MVT::v1i64, Custom);
1390       setOperationAction(ISD::SMIN, MVT::v2i64, Custom);
1391       setOperationAction(ISD::UMAX, MVT::v1i64, Custom);
1392       setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
1393       setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
1394       setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
1395       setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
1396       setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
1397       setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
1398       setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
1399 
1400       // Int operations with no NEON support.
1401       for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1402                       MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
1403         setOperationAction(ISD::BITREVERSE, VT, Custom);
1404         setOperationAction(ISD::CTTZ, VT, Custom);
1405         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1406         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1407         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1408       }
1409 
1410       // FP operations with no NEON support.
1411       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32,
1412                       MVT::v1f64, MVT::v2f64})
1413         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1414 
1415       // Use SVE for vectors with more than 2 elements.
1416       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
1417         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1418     }
1419 
1420     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64);
1421     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
1422     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
1423     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
1424 
1425     setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1426   }
1427 
1428   if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
1429     // Only required for llvm.aarch64.mops.memset.tag
1430     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
1431   }
1432 
1433   PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
1434 
1435   IsStrictFPEnabled = true;
1436 }
1437 
1438 void AArch64TargetLowering::addTypeForNEON(MVT VT) {
1439   assert(VT.isVector() && "VT should be a vector type");
1440 
1441   if (VT.isFloatingPoint()) {
1442     MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT();
1443     setOperationPromotedToType(ISD::LOAD, VT, PromoteTo);
1444     setOperationPromotedToType(ISD::STORE, VT, PromoteTo);
1445   }
1446 
1447   // Mark vector float intrinsics as expand.
1448   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
1449     setOperationAction(ISD::FSIN, VT, Expand);
1450     setOperationAction(ISD::FCOS, VT, Expand);
1451     setOperationAction(ISD::FPOW, VT, Expand);
1452     setOperationAction(ISD::FLOG, VT, Expand);
1453     setOperationAction(ISD::FLOG2, VT, Expand);
1454     setOperationAction(ISD::FLOG10, VT, Expand);
1455     setOperationAction(ISD::FEXP, VT, Expand);
1456     setOperationAction(ISD::FEXP2, VT, Expand);
1457   }
1458 
1459   // But we do support custom-lowering for FCOPYSIGN.
1460   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
1461       ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16()))
1462     setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1463 
1464   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1465   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1466   setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1467   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1468   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1469   setOperationAction(ISD::SRA, VT, Custom);
1470   setOperationAction(ISD::SRL, VT, Custom);
1471   setOperationAction(ISD::SHL, VT, Custom);
1472   setOperationAction(ISD::OR, VT, Custom);
1473   setOperationAction(ISD::SETCC, VT, Custom);
1474   setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
1475 
1476   setOperationAction(ISD::SELECT, VT, Expand);
1477   setOperationAction(ISD::SELECT_CC, VT, Expand);
1478   setOperationAction(ISD::VSELECT, VT, Expand);
1479   for (MVT InnerVT : MVT::all_valuetypes())
1480     setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1481 
1482   // CNT supports only B element sizes, then use UADDLP to widen.
1483   if (VT != MVT::v8i8 && VT != MVT::v16i8)
1484     setOperationAction(ISD::CTPOP, VT, Custom);
1485 
1486   setOperationAction(ISD::UDIV, VT, Expand);
1487   setOperationAction(ISD::SDIV, VT, Expand);
1488   setOperationAction(ISD::UREM, VT, Expand);
1489   setOperationAction(ISD::SREM, VT, Expand);
1490   setOperationAction(ISD::FREM, VT, Expand);
1491 
1492   for (unsigned Opcode :
1493        {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
1494         ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1495     setOperationAction(Opcode, VT, Custom);
1496 
1497   if (!VT.isFloatingPoint())
1498     setOperationAction(ISD::ABS, VT, Legal);
1499 
1500   // [SU][MIN|MAX] are available for all NEON types apart from i64.
1501   if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
1502     for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
1503       setOperationAction(Opcode, VT, Legal);
1504 
1505   // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP
1506   // NEON types.
1507   if (VT.isFloatingPoint() &&
1508       VT.getVectorElementType() != MVT::bf16 &&
1509       (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()))
1510     for (unsigned Opcode :
1511          {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM,
1512           ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM,
1513           ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
1514           ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA,
1515           ISD::STRICT_FSQRT})
1516       setOperationAction(Opcode, VT, Legal);
1517 
1518   // Strict fp extend and trunc are legal
1519   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16)
1520     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
1521   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64)
1522     setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
1523 
1524   // FIXME: We could potentially make use of the vector comparison instructions
1525   // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of
1526   // complications:
1527   //  * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons,
1528   //    so we would need to expand when the condition code doesn't match the
1529   //    kind of comparison.
1530   //  * Some kinds of comparison require more than one FCMXY instruction so
1531   //    would need to be expanded instead.
1532   //  * The lowering of the non-strict versions involves target-specific ISD
1533   //    nodes so we would likely need to add strict versions of all of them and
1534   //    handle them appropriately.
1535   setOperationAction(ISD::STRICT_FSETCC, VT, Expand);
1536   setOperationAction(ISD::STRICT_FSETCCS, VT, Expand);
1537 
1538   if (Subtarget->isLittleEndian()) {
1539     for (unsigned im = (unsigned)ISD::PRE_INC;
1540          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1541       setIndexedLoadAction(im, VT, Legal);
1542       setIndexedStoreAction(im, VT, Legal);
1543     }
1544   }
1545 }
1546 
1547 bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
1548                                                           EVT OpVT) const {
1549   // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
1550   if (!Subtarget->hasSVE())
1551     return true;
1552 
1553   // We can only support legal predicate result types. We can use the SVE
1554   // whilelo instruction for generating fixed-width predicates too.
1555   if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
1556       ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
1557       ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
1558     return true;
1559 
1560   // The whilelo instruction only works with i32 or i64 scalar inputs.
1561   if (OpVT != MVT::i32 && OpVT != MVT::i64)
1562     return true;
1563 
1564   return false;
1565 }
1566 
1567 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1568   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
1569 
1570   // By default everything must be expanded.
1571   for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1572     setOperationAction(Op, VT, Expand);
1573 
1574   // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one.
1575   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1576 
1577   if (VT.isFloatingPoint()) {
1578     setCondCodeAction(ISD::SETO, VT, Expand);
1579     setCondCodeAction(ISD::SETOLT, VT, Expand);
1580     setCondCodeAction(ISD::SETLT, VT, Expand);
1581     setCondCodeAction(ISD::SETOLE, VT, Expand);
1582     setCondCodeAction(ISD::SETLE, VT, Expand);
1583     setCondCodeAction(ISD::SETULT, VT, Expand);
1584     setCondCodeAction(ISD::SETULE, VT, Expand);
1585     setCondCodeAction(ISD::SETUGE, VT, Expand);
1586     setCondCodeAction(ISD::SETUGT, VT, Expand);
1587     setCondCodeAction(ISD::SETUEQ, VT, Expand);
1588     setCondCodeAction(ISD::SETONE, VT, Expand);
1589   }
1590 
1591   // Mark integer truncating stores/extending loads as having custom lowering
1592   if (VT.isInteger()) {
1593     MVT InnerVT = VT.changeVectorElementType(MVT::i8);
1594     while (InnerVT != VT) {
1595       setTruncStoreAction(VT, InnerVT, Custom);
1596       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
1597       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
1598       InnerVT = InnerVT.changeVectorElementType(
1599           MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
1600     }
1601   }
1602 
1603   // Mark floating-point truncating stores/extending loads as having custom
1604   // lowering
1605   if (VT.isFloatingPoint()) {
1606     MVT InnerVT = VT.changeVectorElementType(MVT::f16);
1607     while (InnerVT != VT) {
1608       setTruncStoreAction(VT, InnerVT, Custom);
1609       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
1610       InnerVT = InnerVT.changeVectorElementType(
1611           MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
1612     }
1613   }
1614 
1615   // Lower fixed length vector operations to scalable equivalents.
1616   setOperationAction(ISD::ABS, VT, Custom);
1617   setOperationAction(ISD::ADD, VT, Custom);
1618   setOperationAction(ISD::AND, VT, Custom);
1619   setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1620   setOperationAction(ISD::BITCAST, VT, Custom);
1621   setOperationAction(ISD::BITREVERSE, VT, Custom);
1622   setOperationAction(ISD::BSWAP, VT, Custom);
1623   setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1624   setOperationAction(ISD::CTLZ, VT, Custom);
1625   setOperationAction(ISD::CTPOP, VT, Custom);
1626   setOperationAction(ISD::CTTZ, VT, Custom);
1627   setOperationAction(ISD::FABS, VT, Custom);
1628   setOperationAction(ISD::FADD, VT, Custom);
1629   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1630   setOperationAction(ISD::FCEIL, VT, Custom);
1631   setOperationAction(ISD::FDIV, VT, Custom);
1632   setOperationAction(ISD::FFLOOR, VT, Custom);
1633   setOperationAction(ISD::FMA, VT, Custom);
1634   setOperationAction(ISD::FMAXIMUM, VT, Custom);
1635   setOperationAction(ISD::FMAXNUM, VT, Custom);
1636   setOperationAction(ISD::FMINIMUM, VT, Custom);
1637   setOperationAction(ISD::FMINNUM, VT, Custom);
1638   setOperationAction(ISD::FMUL, VT, Custom);
1639   setOperationAction(ISD::FNEARBYINT, VT, Custom);
1640   setOperationAction(ISD::FNEG, VT, Custom);
1641   setOperationAction(ISD::FP_EXTEND, VT, Custom);
1642   setOperationAction(ISD::FP_ROUND, VT, Custom);
1643   setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1644   setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1645   setOperationAction(ISD::FRINT, VT, Custom);
1646   setOperationAction(ISD::FROUND, VT, Custom);
1647   setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1648   setOperationAction(ISD::FSQRT, VT, Custom);
1649   setOperationAction(ISD::FSUB, VT, Custom);
1650   setOperationAction(ISD::FTRUNC, VT, Custom);
1651   setOperationAction(ISD::LOAD, VT, Custom);
1652   setOperationAction(ISD::MGATHER, VT, Custom);
1653   setOperationAction(ISD::MLOAD, VT, Custom);
1654   setOperationAction(ISD::MSCATTER, VT, Custom);
1655   setOperationAction(ISD::MSTORE, VT, Custom);
1656   setOperationAction(ISD::MUL, VT, Custom);
1657   setOperationAction(ISD::MULHS, VT, Custom);
1658   setOperationAction(ISD::MULHU, VT, Custom);
1659   setOperationAction(ISD::OR, VT, Custom);
1660   setOperationAction(ISD::SDIV, VT, Custom);
1661   setOperationAction(ISD::SELECT, VT, Custom);
1662   setOperationAction(ISD::SETCC, VT, Custom);
1663   setOperationAction(ISD::SHL, VT, Custom);
1664   setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1665   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1666   setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1667   setOperationAction(ISD::SMAX, VT, Custom);
1668   setOperationAction(ISD::SMIN, VT, Custom);
1669   setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1670   setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1671   setOperationAction(ISD::SRA, VT, Custom);
1672   setOperationAction(ISD::SRL, VT, Custom);
1673   setOperationAction(ISD::STORE, VT, Custom);
1674   setOperationAction(ISD::SUB, VT, Custom);
1675   setOperationAction(ISD::TRUNCATE, VT, Custom);
1676   setOperationAction(ISD::UDIV, VT, Custom);
1677   setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1678   setOperationAction(ISD::UMAX, VT, Custom);
1679   setOperationAction(ISD::UMIN, VT, Custom);
1680   setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1681   setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1682   setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1683   setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1684   setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1685   setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1686   setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1687   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1688   setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1689   setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1690   setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1691   setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1692   setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1693   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1694   setOperationAction(ISD::VSELECT, VT, Custom);
1695   setOperationAction(ISD::XOR, VT, Custom);
1696   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1697 }
1698 
1699 void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
1700   addRegisterClass(VT, &AArch64::FPR64RegClass);
1701   addTypeForNEON(VT);
1702 }
1703 
1704 void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
1705   addRegisterClass(VT, &AArch64::FPR128RegClass);
1706   addTypeForNEON(VT);
1707 }
1708 
1709 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
1710                                               LLVMContext &C, EVT VT) const {
1711   if (!VT.isVector())
1712     return MVT::i32;
1713   if (VT.isScalableVector())
1714     return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
1715   return VT.changeVectorElementTypeToInteger();
1716 }
1717 
1718 static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
1719                                const APInt &Demanded,
1720                                TargetLowering::TargetLoweringOpt &TLO,
1721                                unsigned NewOpc) {
1722   uint64_t OldImm = Imm, NewImm, Enc;
1723   uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
1724 
1725   // Return if the immediate is already all zeros, all ones, a bimm32 or a
1726   // bimm64.
1727   if (Imm == 0 || Imm == Mask ||
1728       AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
1729     return false;
1730 
1731   unsigned EltSize = Size;
1732   uint64_t DemandedBits = Demanded.getZExtValue();
1733 
1734   // Clear bits that are not demanded.
1735   Imm &= DemandedBits;
1736 
1737   while (true) {
1738     // The goal here is to set the non-demanded bits in a way that minimizes
1739     // the number of switching between 0 and 1. In order to achieve this goal,
1740     // we set the non-demanded bits to the value of the preceding demanded bits.
1741     // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
1742     // non-demanded bit), we copy bit0 (1) to the least significant 'x',
1743     // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
1744     // The final result is 0b11000011.
1745     uint64_t NonDemandedBits = ~DemandedBits;
1746     uint64_t InvertedImm = ~Imm & DemandedBits;
1747     uint64_t RotatedImm =
1748         ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
1749         NonDemandedBits;
1750     uint64_t Sum = RotatedImm + NonDemandedBits;
1751     bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
1752     uint64_t Ones = (Sum + Carry) & NonDemandedBits;
1753     NewImm = (Imm | Ones) & Mask;
1754 
1755     // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
1756     // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
1757     // we halve the element size and continue the search.
1758     if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
1759       break;
1760 
1761     // We cannot shrink the element size any further if it is 2-bits.
1762     if (EltSize == 2)
1763       return false;
1764 
1765     EltSize /= 2;
1766     Mask >>= EltSize;
1767     uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
1768 
1769     // Return if there is mismatch in any of the demanded bits of Imm and Hi.
1770     if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
1771       return false;
1772 
1773     // Merge the upper and lower halves of Imm and DemandedBits.
1774     Imm |= Hi;
1775     DemandedBits |= DemandedBitsHi;
1776   }
1777 
1778   ++NumOptimizedImms;
1779 
1780   // Replicate the element across the register width.
1781   while (EltSize < Size) {
1782     NewImm |= NewImm << EltSize;
1783     EltSize *= 2;
1784   }
1785 
1786   (void)OldImm;
1787   assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
1788          "demanded bits should never be altered");
1789   assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
1790 
1791   // Create the new constant immediate node.
1792   EVT VT = Op.getValueType();
1793   SDLoc DL(Op);
1794   SDValue New;
1795 
1796   // If the new constant immediate is all-zeros or all-ones, let the target
1797   // independent DAG combine optimize this node.
1798   if (NewImm == 0 || NewImm == OrigMask) {
1799     New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
1800                           TLO.DAG.getConstant(NewImm, DL, VT));
1801   // Otherwise, create a machine node so that target independent DAG combine
1802   // doesn't undo this optimization.
1803   } else {
1804     Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
1805     SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
1806     New = SDValue(
1807         TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
1808   }
1809 
1810   return TLO.CombineTo(Op, New);
1811 }
1812 
1813 bool AArch64TargetLowering::targetShrinkDemandedConstant(
1814     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
1815     TargetLoweringOpt &TLO) const {
1816   // Delay this optimization to as late as possible.
1817   if (!TLO.LegalOps)
1818     return false;
1819 
1820   if (!EnableOptimizeLogicalImm)
1821     return false;
1822 
1823   EVT VT = Op.getValueType();
1824   if (VT.isVector())
1825     return false;
1826 
1827   unsigned Size = VT.getSizeInBits();
1828   assert((Size == 32 || Size == 64) &&
1829          "i32 or i64 is expected after legalization.");
1830 
1831   // Exit early if we demand all bits.
1832   if (DemandedBits.countPopulation() == Size)
1833     return false;
1834 
1835   unsigned NewOpc;
1836   switch (Op.getOpcode()) {
1837   default:
1838     return false;
1839   case ISD::AND:
1840     NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
1841     break;
1842   case ISD::OR:
1843     NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
1844     break;
1845   case ISD::XOR:
1846     NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1847     break;
1848   }
1849   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1850   if (!C)
1851     return false;
1852   uint64_t Imm = C->getZExtValue();
1853   return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc);
1854 }
1855 
1856 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
1857 /// Mask are known to be either zero or one and return them Known.
1858 void AArch64TargetLowering::computeKnownBitsForTargetNode(
1859     const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1860     const SelectionDAG &DAG, unsigned Depth) const {
1861   switch (Op.getOpcode()) {
1862   default:
1863     break;
1864   case AArch64ISD::DUP: {
1865     SDValue SrcOp = Op.getOperand(0);
1866     Known = DAG.computeKnownBits(SrcOp, Depth + 1);
1867     if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) {
1868       assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&
1869              "Expected DUP implicit truncation");
1870       Known = Known.trunc(Op.getScalarValueSizeInBits());
1871     }
1872     break;
1873   }
1874   case AArch64ISD::CSEL: {
1875     KnownBits Known2;
1876     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1877     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1878     Known = KnownBits::commonBits(Known, Known2);
1879     break;
1880   }
1881   case AArch64ISD::BICi: {
1882     // Compute the bit cleared value.
1883     uint64_t Mask =
1884         ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
1885     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1886     Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
1887     break;
1888   }
1889   case AArch64ISD::VLSHR: {
1890     KnownBits Known2;
1891     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1892     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1893     Known = KnownBits::lshr(Known, Known2);
1894     break;
1895   }
1896   case AArch64ISD::VASHR: {
1897     KnownBits Known2;
1898     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1899     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1900     Known = KnownBits::ashr(Known, Known2);
1901     break;
1902   }
1903   case AArch64ISD::LOADgot:
1904   case AArch64ISD::ADDlow: {
1905     if (!Subtarget->isTargetILP32())
1906       break;
1907     // In ILP32 mode all valid pointers are in the low 4GB of the address-space.
1908     Known.Zero = APInt::getHighBitsSet(64, 32);
1909     break;
1910   }
1911   case AArch64ISD::ASSERT_ZEXT_BOOL: {
1912     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1913     Known.Zero |= APInt(Known.getBitWidth(), 0xFE);
1914     break;
1915   }
1916   case ISD::INTRINSIC_W_CHAIN: {
1917     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
1918     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
1919     switch (IntID) {
1920     default: return;
1921     case Intrinsic::aarch64_ldaxr:
1922     case Intrinsic::aarch64_ldxr: {
1923       unsigned BitWidth = Known.getBitWidth();
1924       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
1925       unsigned MemBits = VT.getScalarSizeInBits();
1926       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1927       return;
1928     }
1929     }
1930     break;
1931   }
1932   case ISD::INTRINSIC_WO_CHAIN:
1933   case ISD::INTRINSIC_VOID: {
1934     unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1935     switch (IntNo) {
1936     default:
1937       break;
1938     case Intrinsic::aarch64_neon_umaxv:
1939     case Intrinsic::aarch64_neon_uminv: {
1940       // Figure out the datatype of the vector operand. The UMINV instruction
1941       // will zero extend the result, so we can mark as known zero all the
1942       // bits larger than the element datatype. 32-bit or larget doesn't need
1943       // this as those are legal types and will be handled by isel directly.
1944       MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
1945       unsigned BitWidth = Known.getBitWidth();
1946       if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1947         assert(BitWidth >= 8 && "Unexpected width!");
1948         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
1949         Known.Zero |= Mask;
1950       } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1951         assert(BitWidth >= 16 && "Unexpected width!");
1952         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
1953         Known.Zero |= Mask;
1954       }
1955       break;
1956     } break;
1957     }
1958   }
1959   }
1960 }
1961 
1962 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
1963                                                   EVT) const {
1964   return MVT::i64;
1965 }
1966 
1967 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1968     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1969     bool *Fast) const {
1970   if (Subtarget->requiresStrictAlign())
1971     return false;
1972 
1973   if (Fast) {
1974     // Some CPUs are fine with unaligned stores except for 128-bit ones.
1975     *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
1976             // See comments in performSTORECombine() for more details about
1977             // these conditions.
1978 
1979             // Code that uses clang vector extensions can mark that it
1980             // wants unaligned accesses to be treated as fast by
1981             // underspecifying alignment to be 1 or 2.
1982             Alignment <= 2 ||
1983 
1984             // Disregard v2i64. Memcpy lowering produces those and splitting
1985             // them regresses performance on micro-benchmarks and olden/bh.
1986             VT == MVT::v2i64;
1987   }
1988   return true;
1989 }
1990 
1991 // Same as above but handling LLTs instead.
1992 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1993     LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1994     bool *Fast) const {
1995   if (Subtarget->requiresStrictAlign())
1996     return false;
1997 
1998   if (Fast) {
1999     // Some CPUs are fine with unaligned stores except for 128-bit ones.
2000     *Fast = !Subtarget->isMisaligned128StoreSlow() ||
2001             Ty.getSizeInBytes() != 16 ||
2002             // See comments in performSTORECombine() for more details about
2003             // these conditions.
2004 
2005             // Code that uses clang vector extensions can mark that it
2006             // wants unaligned accesses to be treated as fast by
2007             // underspecifying alignment to be 1 or 2.
2008             Alignment <= 2 ||
2009 
2010             // Disregard v2i64. Memcpy lowering produces those and splitting
2011             // them regresses performance on micro-benchmarks and olden/bh.
2012             Ty == LLT::fixed_vector(2, 64);
2013   }
2014   return true;
2015 }
2016 
2017 FastISel *
2018 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2019                                       const TargetLibraryInfo *libInfo) const {
2020   return AArch64::createFastISel(funcInfo, libInfo);
2021 }
2022 
2023 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
2024 #define MAKE_CASE(V)                                                           \
2025   case V:                                                                      \
2026     return #V;
2027   switch ((AArch64ISD::NodeType)Opcode) {
2028   case AArch64ISD::FIRST_NUMBER:
2029     break;
2030     MAKE_CASE(AArch64ISD::CALL)
2031     MAKE_CASE(AArch64ISD::ADRP)
2032     MAKE_CASE(AArch64ISD::ADR)
2033     MAKE_CASE(AArch64ISD::ADDlow)
2034     MAKE_CASE(AArch64ISD::LOADgot)
2035     MAKE_CASE(AArch64ISD::RET_FLAG)
2036     MAKE_CASE(AArch64ISD::BRCOND)
2037     MAKE_CASE(AArch64ISD::CSEL)
2038     MAKE_CASE(AArch64ISD::CSINV)
2039     MAKE_CASE(AArch64ISD::CSNEG)
2040     MAKE_CASE(AArch64ISD::CSINC)
2041     MAKE_CASE(AArch64ISD::THREAD_POINTER)
2042     MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
2043     MAKE_CASE(AArch64ISD::ABDS_PRED)
2044     MAKE_CASE(AArch64ISD::ABDU_PRED)
2045     MAKE_CASE(AArch64ISD::MUL_PRED)
2046     MAKE_CASE(AArch64ISD::MULHS_PRED)
2047     MAKE_CASE(AArch64ISD::MULHU_PRED)
2048     MAKE_CASE(AArch64ISD::SDIV_PRED)
2049     MAKE_CASE(AArch64ISD::SHL_PRED)
2050     MAKE_CASE(AArch64ISD::SMAX_PRED)
2051     MAKE_CASE(AArch64ISD::SMIN_PRED)
2052     MAKE_CASE(AArch64ISD::SRA_PRED)
2053     MAKE_CASE(AArch64ISD::SRL_PRED)
2054     MAKE_CASE(AArch64ISD::UDIV_PRED)
2055     MAKE_CASE(AArch64ISD::UMAX_PRED)
2056     MAKE_CASE(AArch64ISD::UMIN_PRED)
2057     MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1)
2058     MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU)
2059     MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU)
2060     MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU)
2061     MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU)
2062     MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU)
2063     MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU)
2064     MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU)
2065     MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU)
2066     MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU)
2067     MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU)
2068     MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU)
2069     MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU)
2070     MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU)
2071     MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU)
2072     MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU)
2073     MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU)
2074     MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
2075     MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
2076     MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
2077     MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
2078     MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
2079     MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
2080     MAKE_CASE(AArch64ISD::ADC)
2081     MAKE_CASE(AArch64ISD::SBC)
2082     MAKE_CASE(AArch64ISD::ADDS)
2083     MAKE_CASE(AArch64ISD::SUBS)
2084     MAKE_CASE(AArch64ISD::ADCS)
2085     MAKE_CASE(AArch64ISD::SBCS)
2086     MAKE_CASE(AArch64ISD::ANDS)
2087     MAKE_CASE(AArch64ISD::CCMP)
2088     MAKE_CASE(AArch64ISD::CCMN)
2089     MAKE_CASE(AArch64ISD::FCCMP)
2090     MAKE_CASE(AArch64ISD::FCMP)
2091     MAKE_CASE(AArch64ISD::STRICT_FCMP)
2092     MAKE_CASE(AArch64ISD::STRICT_FCMPE)
2093     MAKE_CASE(AArch64ISD::DUP)
2094     MAKE_CASE(AArch64ISD::DUPLANE8)
2095     MAKE_CASE(AArch64ISD::DUPLANE16)
2096     MAKE_CASE(AArch64ISD::DUPLANE32)
2097     MAKE_CASE(AArch64ISD::DUPLANE64)
2098     MAKE_CASE(AArch64ISD::DUPLANE128)
2099     MAKE_CASE(AArch64ISD::MOVI)
2100     MAKE_CASE(AArch64ISD::MOVIshift)
2101     MAKE_CASE(AArch64ISD::MOVIedit)
2102     MAKE_CASE(AArch64ISD::MOVImsl)
2103     MAKE_CASE(AArch64ISD::FMOV)
2104     MAKE_CASE(AArch64ISD::MVNIshift)
2105     MAKE_CASE(AArch64ISD::MVNImsl)
2106     MAKE_CASE(AArch64ISD::BICi)
2107     MAKE_CASE(AArch64ISD::ORRi)
2108     MAKE_CASE(AArch64ISD::BSP)
2109     MAKE_CASE(AArch64ISD::EXTR)
2110     MAKE_CASE(AArch64ISD::ZIP1)
2111     MAKE_CASE(AArch64ISD::ZIP2)
2112     MAKE_CASE(AArch64ISD::UZP1)
2113     MAKE_CASE(AArch64ISD::UZP2)
2114     MAKE_CASE(AArch64ISD::TRN1)
2115     MAKE_CASE(AArch64ISD::TRN2)
2116     MAKE_CASE(AArch64ISD::REV16)
2117     MAKE_CASE(AArch64ISD::REV32)
2118     MAKE_CASE(AArch64ISD::REV64)
2119     MAKE_CASE(AArch64ISD::EXT)
2120     MAKE_CASE(AArch64ISD::SPLICE)
2121     MAKE_CASE(AArch64ISD::VSHL)
2122     MAKE_CASE(AArch64ISD::VLSHR)
2123     MAKE_CASE(AArch64ISD::VASHR)
2124     MAKE_CASE(AArch64ISD::VSLI)
2125     MAKE_CASE(AArch64ISD::VSRI)
2126     MAKE_CASE(AArch64ISD::CMEQ)
2127     MAKE_CASE(AArch64ISD::CMGE)
2128     MAKE_CASE(AArch64ISD::CMGT)
2129     MAKE_CASE(AArch64ISD::CMHI)
2130     MAKE_CASE(AArch64ISD::CMHS)
2131     MAKE_CASE(AArch64ISD::FCMEQ)
2132     MAKE_CASE(AArch64ISD::FCMGE)
2133     MAKE_CASE(AArch64ISD::FCMGT)
2134     MAKE_CASE(AArch64ISD::CMEQz)
2135     MAKE_CASE(AArch64ISD::CMGEz)
2136     MAKE_CASE(AArch64ISD::CMGTz)
2137     MAKE_CASE(AArch64ISD::CMLEz)
2138     MAKE_CASE(AArch64ISD::CMLTz)
2139     MAKE_CASE(AArch64ISD::FCMEQz)
2140     MAKE_CASE(AArch64ISD::FCMGEz)
2141     MAKE_CASE(AArch64ISD::FCMGTz)
2142     MAKE_CASE(AArch64ISD::FCMLEz)
2143     MAKE_CASE(AArch64ISD::FCMLTz)
2144     MAKE_CASE(AArch64ISD::SADDV)
2145     MAKE_CASE(AArch64ISD::UADDV)
2146     MAKE_CASE(AArch64ISD::SDOT)
2147     MAKE_CASE(AArch64ISD::UDOT)
2148     MAKE_CASE(AArch64ISD::SMINV)
2149     MAKE_CASE(AArch64ISD::UMINV)
2150     MAKE_CASE(AArch64ISD::SMAXV)
2151     MAKE_CASE(AArch64ISD::UMAXV)
2152     MAKE_CASE(AArch64ISD::SADDV_PRED)
2153     MAKE_CASE(AArch64ISD::UADDV_PRED)
2154     MAKE_CASE(AArch64ISD::SMAXV_PRED)
2155     MAKE_CASE(AArch64ISD::UMAXV_PRED)
2156     MAKE_CASE(AArch64ISD::SMINV_PRED)
2157     MAKE_CASE(AArch64ISD::UMINV_PRED)
2158     MAKE_CASE(AArch64ISD::ORV_PRED)
2159     MAKE_CASE(AArch64ISD::EORV_PRED)
2160     MAKE_CASE(AArch64ISD::ANDV_PRED)
2161     MAKE_CASE(AArch64ISD::CLASTA_N)
2162     MAKE_CASE(AArch64ISD::CLASTB_N)
2163     MAKE_CASE(AArch64ISD::LASTA)
2164     MAKE_CASE(AArch64ISD::LASTB)
2165     MAKE_CASE(AArch64ISD::REINTERPRET_CAST)
2166     MAKE_CASE(AArch64ISD::LS64_BUILD)
2167     MAKE_CASE(AArch64ISD::LS64_EXTRACT)
2168     MAKE_CASE(AArch64ISD::TBL)
2169     MAKE_CASE(AArch64ISD::FADD_PRED)
2170     MAKE_CASE(AArch64ISD::FADDA_PRED)
2171     MAKE_CASE(AArch64ISD::FADDV_PRED)
2172     MAKE_CASE(AArch64ISD::FDIV_PRED)
2173     MAKE_CASE(AArch64ISD::FMA_PRED)
2174     MAKE_CASE(AArch64ISD::FMAX_PRED)
2175     MAKE_CASE(AArch64ISD::FMAXV_PRED)
2176     MAKE_CASE(AArch64ISD::FMAXNM_PRED)
2177     MAKE_CASE(AArch64ISD::FMAXNMV_PRED)
2178     MAKE_CASE(AArch64ISD::FMIN_PRED)
2179     MAKE_CASE(AArch64ISD::FMINV_PRED)
2180     MAKE_CASE(AArch64ISD::FMINNM_PRED)
2181     MAKE_CASE(AArch64ISD::FMINNMV_PRED)
2182     MAKE_CASE(AArch64ISD::FMUL_PRED)
2183     MAKE_CASE(AArch64ISD::FSUB_PRED)
2184     MAKE_CASE(AArch64ISD::RDSVL)
2185     MAKE_CASE(AArch64ISD::BIC)
2186     MAKE_CASE(AArch64ISD::BIT)
2187     MAKE_CASE(AArch64ISD::CBZ)
2188     MAKE_CASE(AArch64ISD::CBNZ)
2189     MAKE_CASE(AArch64ISD::TBZ)
2190     MAKE_CASE(AArch64ISD::TBNZ)
2191     MAKE_CASE(AArch64ISD::TC_RETURN)
2192     MAKE_CASE(AArch64ISD::PREFETCH)
2193     MAKE_CASE(AArch64ISD::SITOF)
2194     MAKE_CASE(AArch64ISD::UITOF)
2195     MAKE_CASE(AArch64ISD::NVCAST)
2196     MAKE_CASE(AArch64ISD::MRS)
2197     MAKE_CASE(AArch64ISD::SQSHL_I)
2198     MAKE_CASE(AArch64ISD::UQSHL_I)
2199     MAKE_CASE(AArch64ISD::SRSHR_I)
2200     MAKE_CASE(AArch64ISD::URSHR_I)
2201     MAKE_CASE(AArch64ISD::SQSHLU_I)
2202     MAKE_CASE(AArch64ISD::WrapperLarge)
2203     MAKE_CASE(AArch64ISD::LD2post)
2204     MAKE_CASE(AArch64ISD::LD3post)
2205     MAKE_CASE(AArch64ISD::LD4post)
2206     MAKE_CASE(AArch64ISD::ST2post)
2207     MAKE_CASE(AArch64ISD::ST3post)
2208     MAKE_CASE(AArch64ISD::ST4post)
2209     MAKE_CASE(AArch64ISD::LD1x2post)
2210     MAKE_CASE(AArch64ISD::LD1x3post)
2211     MAKE_CASE(AArch64ISD::LD1x4post)
2212     MAKE_CASE(AArch64ISD::ST1x2post)
2213     MAKE_CASE(AArch64ISD::ST1x3post)
2214     MAKE_CASE(AArch64ISD::ST1x4post)
2215     MAKE_CASE(AArch64ISD::LD1DUPpost)
2216     MAKE_CASE(AArch64ISD::LD2DUPpost)
2217     MAKE_CASE(AArch64ISD::LD3DUPpost)
2218     MAKE_CASE(AArch64ISD::LD4DUPpost)
2219     MAKE_CASE(AArch64ISD::LD1LANEpost)
2220     MAKE_CASE(AArch64ISD::LD2LANEpost)
2221     MAKE_CASE(AArch64ISD::LD3LANEpost)
2222     MAKE_CASE(AArch64ISD::LD4LANEpost)
2223     MAKE_CASE(AArch64ISD::ST2LANEpost)
2224     MAKE_CASE(AArch64ISD::ST3LANEpost)
2225     MAKE_CASE(AArch64ISD::ST4LANEpost)
2226     MAKE_CASE(AArch64ISD::SMULL)
2227     MAKE_CASE(AArch64ISD::UMULL)
2228     MAKE_CASE(AArch64ISD::FRECPE)
2229     MAKE_CASE(AArch64ISD::FRECPS)
2230     MAKE_CASE(AArch64ISD::FRSQRTE)
2231     MAKE_CASE(AArch64ISD::FRSQRTS)
2232     MAKE_CASE(AArch64ISD::STG)
2233     MAKE_CASE(AArch64ISD::STZG)
2234     MAKE_CASE(AArch64ISD::ST2G)
2235     MAKE_CASE(AArch64ISD::STZ2G)
2236     MAKE_CASE(AArch64ISD::SUNPKHI)
2237     MAKE_CASE(AArch64ISD::SUNPKLO)
2238     MAKE_CASE(AArch64ISD::UUNPKHI)
2239     MAKE_CASE(AArch64ISD::UUNPKLO)
2240     MAKE_CASE(AArch64ISD::INSR)
2241     MAKE_CASE(AArch64ISD::PTEST)
2242     MAKE_CASE(AArch64ISD::PTRUE)
2243     MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
2244     MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
2245     MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
2246     MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO)
2247     MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO)
2248     MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO)
2249     MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO)
2250     MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO)
2251     MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO)
2252     MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO)
2253     MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO)
2254     MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO)
2255     MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO)
2256     MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO)
2257     MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO)
2258     MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO)
2259     MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO)
2260     MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO)
2261     MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO)
2262     MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO)
2263     MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO)
2264     MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO)
2265     MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO)
2266     MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO)
2267     MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO)
2268     MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO)
2269     MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO)
2270     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO)
2271     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO)
2272     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO)
2273     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO)
2274     MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO)
2275     MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO)
2276     MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO)
2277     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO)
2278     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO)
2279     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO)
2280     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO)
2281     MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO)
2282     MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO)
2283     MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO)
2284     MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO)
2285     MAKE_CASE(AArch64ISD::ST1_PRED)
2286     MAKE_CASE(AArch64ISD::SST1_PRED)
2287     MAKE_CASE(AArch64ISD::SST1_SCALED_PRED)
2288     MAKE_CASE(AArch64ISD::SST1_SXTW_PRED)
2289     MAKE_CASE(AArch64ISD::SST1_UXTW_PRED)
2290     MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED)
2291     MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED)
2292     MAKE_CASE(AArch64ISD::SST1_IMM_PRED)
2293     MAKE_CASE(AArch64ISD::SSTNT1_PRED)
2294     MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED)
2295     MAKE_CASE(AArch64ISD::LDP)
2296     MAKE_CASE(AArch64ISD::STP)
2297     MAKE_CASE(AArch64ISD::STNP)
2298     MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
2299     MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
2300     MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU)
2301     MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU)
2302     MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU)
2303     MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU)
2304     MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU)
2305     MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
2306     MAKE_CASE(AArch64ISD::INDEX_VECTOR)
2307     MAKE_CASE(AArch64ISD::ADDP)
2308     MAKE_CASE(AArch64ISD::SADDLP)
2309     MAKE_CASE(AArch64ISD::UADDLP)
2310     MAKE_CASE(AArch64ISD::CALL_RVMARKER)
2311     MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL)
2312     MAKE_CASE(AArch64ISD::MOPS_MEMSET)
2313     MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING)
2314     MAKE_CASE(AArch64ISD::MOPS_MEMCOPY)
2315     MAKE_CASE(AArch64ISD::MOPS_MEMMOVE)
2316     MAKE_CASE(AArch64ISD::CALL_BTI)
2317   }
2318 #undef MAKE_CASE
2319   return nullptr;
2320 }
2321 
2322 MachineBasicBlock *
2323 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
2324                                     MachineBasicBlock *MBB) const {
2325   // We materialise the F128CSEL pseudo-instruction as some control flow and a
2326   // phi node:
2327 
2328   // OrigBB:
2329   //     [... previous instrs leading to comparison ...]
2330   //     b.ne TrueBB
2331   //     b EndBB
2332   // TrueBB:
2333   //     ; Fallthrough
2334   // EndBB:
2335   //     Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
2336 
2337   MachineFunction *MF = MBB->getParent();
2338   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2339   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2340   DebugLoc DL = MI.getDebugLoc();
2341   MachineFunction::iterator It = ++MBB->getIterator();
2342 
2343   Register DestReg = MI.getOperand(0).getReg();
2344   Register IfTrueReg = MI.getOperand(1).getReg();
2345   Register IfFalseReg = MI.getOperand(2).getReg();
2346   unsigned CondCode = MI.getOperand(3).getImm();
2347   bool NZCVKilled = MI.getOperand(4).isKill();
2348 
2349   MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
2350   MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
2351   MF->insert(It, TrueBB);
2352   MF->insert(It, EndBB);
2353 
2354   // Transfer rest of current basic-block to EndBB
2355   EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
2356                 MBB->end());
2357   EndBB->transferSuccessorsAndUpdatePHIs(MBB);
2358 
2359   BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
2360   BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
2361   MBB->addSuccessor(TrueBB);
2362   MBB->addSuccessor(EndBB);
2363 
2364   // TrueBB falls through to the end.
2365   TrueBB->addSuccessor(EndBB);
2366 
2367   if (!NZCVKilled) {
2368     TrueBB->addLiveIn(AArch64::NZCV);
2369     EndBB->addLiveIn(AArch64::NZCV);
2370   }
2371 
2372   BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
2373       .addReg(IfTrueReg)
2374       .addMBB(TrueBB)
2375       .addReg(IfFalseReg)
2376       .addMBB(MBB);
2377 
2378   MI.eraseFromParent();
2379   return EndBB;
2380 }
2381 
2382 MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
2383        MachineInstr &MI, MachineBasicBlock *BB) const {
2384   assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2385              BB->getParent()->getFunction().getPersonalityFn())) &&
2386          "SEH does not use catchret!");
2387   return BB;
2388 }
2389 
2390 MachineBasicBlock *
2391 AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
2392                                     MachineInstr &MI,
2393                                     MachineBasicBlock *BB) const {
2394   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2395   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2396 
2397   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2398   MIB.add(MI.getOperand(1)); // slice index register
2399   MIB.add(MI.getOperand(2)); // slice index offset
2400   MIB.add(MI.getOperand(3)); // pg
2401   MIB.add(MI.getOperand(4)); // base
2402   MIB.add(MI.getOperand(5)); // offset
2403 
2404   MI.eraseFromParent(); // The pseudo is gone now.
2405   return BB;
2406 }
2407 
2408 MachineBasicBlock *
2409 AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
2410   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2411   MachineInstrBuilder MIB =
2412       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA));
2413 
2414   MIB.addReg(AArch64::ZA, RegState::Define);
2415   MIB.add(MI.getOperand(0)); // Vector select register
2416   MIB.add(MI.getOperand(1)); // Vector select offset
2417   MIB.add(MI.getOperand(2)); // Base
2418   MIB.add(MI.getOperand(1)); // Offset, same as vector select offset
2419 
2420   MI.eraseFromParent(); // The pseudo is gone now.
2421   return BB;
2422 }
2423 
2424 MachineBasicBlock *
2425 AArch64TargetLowering::EmitMopa(unsigned Opc, unsigned BaseReg,
2426                                 MachineInstr &MI, MachineBasicBlock *BB) const {
2427   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2428   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2429 
2430   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2431   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2432   MIB.add(MI.getOperand(1)); // pn
2433   MIB.add(MI.getOperand(2)); // pm
2434   MIB.add(MI.getOperand(3)); // zn
2435   MIB.add(MI.getOperand(4)); // zm
2436 
2437   MI.eraseFromParent(); // The pseudo is gone now.
2438   return BB;
2439 }
2440 
2441 MachineBasicBlock *
2442 AArch64TargetLowering::EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg,
2443                                               MachineInstr &MI,
2444                                               MachineBasicBlock *BB) const {
2445   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2446   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2447 
2448   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2449   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2450   MIB.add(MI.getOperand(1)); // Slice index register
2451   MIB.add(MI.getOperand(2)); // Slice index offset
2452   MIB.add(MI.getOperand(3)); // pg
2453   MIB.add(MI.getOperand(4)); // zn
2454 
2455   MI.eraseFromParent(); // The pseudo is gone now.
2456   return BB;
2457 }
2458 
2459 MachineBasicBlock *
2460 AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
2461   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2462   MachineInstrBuilder MIB =
2463       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M));
2464   MIB.add(MI.getOperand(0)); // Mask
2465 
2466   unsigned Mask = MI.getOperand(0).getImm();
2467   for (unsigned I = 0; I < 8; I++) {
2468     if (Mask & (1 << I))
2469       MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine);
2470   }
2471 
2472   MI.eraseFromParent(); // The pseudo is gone now.
2473   return BB;
2474 }
2475 
2476 MachineBasicBlock *
2477 AArch64TargetLowering::EmitAddVectorToTile(unsigned Opc, unsigned BaseReg,
2478                                            MachineInstr &MI,
2479                                            MachineBasicBlock *BB) const {
2480   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2481   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2482 
2483   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2484   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2485   MIB.add(MI.getOperand(1)); // pn
2486   MIB.add(MI.getOperand(2)); // pm
2487   MIB.add(MI.getOperand(3)); // zn
2488 
2489   MI.eraseFromParent(); // The pseudo is gone now.
2490   return BB;
2491 }
2492 
2493 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
2494     MachineInstr &MI, MachineBasicBlock *BB) const {
2495   switch (MI.getOpcode()) {
2496   default:
2497 #ifndef NDEBUG
2498     MI.dump();
2499 #endif
2500     llvm_unreachable("Unexpected instruction for custom inserter!");
2501 
2502   case AArch64::F128CSEL:
2503     return EmitF128CSEL(MI, BB);
2504 
2505   case TargetOpcode::STATEPOINT:
2506     // STATEPOINT is a pseudo instruction which has no implicit defs/uses
2507     // while bl call instruction (where statepoint will be lowered at the end)
2508     // has implicit def. This def is early-clobber as it will be set at
2509     // the moment of the call and earlier than any use is read.
2510     // Add this implicit dead def here as a workaround.
2511     MI.addOperand(*MI.getMF(),
2512                   MachineOperand::CreateReg(
2513                       AArch64::LR, /*isDef*/ true,
2514                       /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
2515                       /*isUndef*/ false, /*isEarlyClobber*/ true));
2516     LLVM_FALLTHROUGH;
2517   case TargetOpcode::STACKMAP:
2518   case TargetOpcode::PATCHPOINT:
2519     return emitPatchPoint(MI, BB);
2520 
2521   case AArch64::CATCHRET:
2522     return EmitLoweredCatchRet(MI, BB);
2523   case AArch64::LD1_MXIPXX_H_PSEUDO_B:
2524     return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB);
2525   case AArch64::LD1_MXIPXX_H_PSEUDO_H:
2526     return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB);
2527   case AArch64::LD1_MXIPXX_H_PSEUDO_S:
2528     return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB);
2529   case AArch64::LD1_MXIPXX_H_PSEUDO_D:
2530     return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB);
2531   case AArch64::LD1_MXIPXX_H_PSEUDO_Q:
2532     return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB);
2533   case AArch64::LD1_MXIPXX_V_PSEUDO_B:
2534     return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB);
2535   case AArch64::LD1_MXIPXX_V_PSEUDO_H:
2536     return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB);
2537   case AArch64::LD1_MXIPXX_V_PSEUDO_S:
2538     return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB);
2539   case AArch64::LD1_MXIPXX_V_PSEUDO_D:
2540     return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB);
2541   case AArch64::LD1_MXIPXX_V_PSEUDO_Q:
2542     return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB);
2543   case AArch64::LDR_ZA_PSEUDO:
2544     return EmitFill(MI, BB);
2545   case AArch64::BFMOPA_MPPZZ_PSEUDO:
2546     return EmitMopa(AArch64::BFMOPA_MPPZZ, AArch64::ZAS0, MI, BB);
2547   case AArch64::BFMOPS_MPPZZ_PSEUDO:
2548     return EmitMopa(AArch64::BFMOPS_MPPZZ, AArch64::ZAS0, MI, BB);
2549   case AArch64::FMOPAL_MPPZZ_PSEUDO:
2550     return EmitMopa(AArch64::FMOPAL_MPPZZ, AArch64::ZAS0, MI, BB);
2551   case AArch64::FMOPSL_MPPZZ_PSEUDO:
2552     return EmitMopa(AArch64::FMOPSL_MPPZZ, AArch64::ZAS0, MI, BB);
2553   case AArch64::FMOPA_MPPZZ_S_PSEUDO:
2554     return EmitMopa(AArch64::FMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2555   case AArch64::FMOPS_MPPZZ_S_PSEUDO:
2556     return EmitMopa(AArch64::FMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2557   case AArch64::FMOPA_MPPZZ_D_PSEUDO:
2558     return EmitMopa(AArch64::FMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2559   case AArch64::FMOPS_MPPZZ_D_PSEUDO:
2560     return EmitMopa(AArch64::FMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2561   case AArch64::SMOPA_MPPZZ_S_PSEUDO:
2562     return EmitMopa(AArch64::SMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2563   case AArch64::SMOPS_MPPZZ_S_PSEUDO:
2564     return EmitMopa(AArch64::SMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2565   case AArch64::UMOPA_MPPZZ_S_PSEUDO:
2566     return EmitMopa(AArch64::UMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2567   case AArch64::UMOPS_MPPZZ_S_PSEUDO:
2568     return EmitMopa(AArch64::UMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2569   case AArch64::SUMOPA_MPPZZ_S_PSEUDO:
2570     return EmitMopa(AArch64::SUMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2571   case AArch64::SUMOPS_MPPZZ_S_PSEUDO:
2572     return EmitMopa(AArch64::SUMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2573   case AArch64::USMOPA_MPPZZ_S_PSEUDO:
2574     return EmitMopa(AArch64::USMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2575   case AArch64::USMOPS_MPPZZ_S_PSEUDO:
2576     return EmitMopa(AArch64::USMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2577   case AArch64::SMOPA_MPPZZ_D_PSEUDO:
2578     return EmitMopa(AArch64::SMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2579   case AArch64::SMOPS_MPPZZ_D_PSEUDO:
2580     return EmitMopa(AArch64::SMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2581   case AArch64::UMOPA_MPPZZ_D_PSEUDO:
2582     return EmitMopa(AArch64::UMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2583   case AArch64::UMOPS_MPPZZ_D_PSEUDO:
2584     return EmitMopa(AArch64::UMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2585   case AArch64::SUMOPA_MPPZZ_D_PSEUDO:
2586     return EmitMopa(AArch64::SUMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2587   case AArch64::SUMOPS_MPPZZ_D_PSEUDO:
2588     return EmitMopa(AArch64::SUMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2589   case AArch64::USMOPA_MPPZZ_D_PSEUDO:
2590     return EmitMopa(AArch64::USMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2591   case AArch64::USMOPS_MPPZZ_D_PSEUDO:
2592     return EmitMopa(AArch64::USMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2593   case AArch64::INSERT_MXIPZ_H_PSEUDO_B:
2594     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_B, AArch64::ZAB0, MI,
2595                                   BB);
2596   case AArch64::INSERT_MXIPZ_H_PSEUDO_H:
2597     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_H, AArch64::ZAH0, MI,
2598                                   BB);
2599   case AArch64::INSERT_MXIPZ_H_PSEUDO_S:
2600     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_S, AArch64::ZAS0, MI,
2601                                   BB);
2602   case AArch64::INSERT_MXIPZ_H_PSEUDO_D:
2603     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_D, AArch64::ZAD0, MI,
2604                                   BB);
2605   case AArch64::INSERT_MXIPZ_H_PSEUDO_Q:
2606     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_Q, AArch64::ZAQ0, MI,
2607                                   BB);
2608   case AArch64::INSERT_MXIPZ_V_PSEUDO_B:
2609     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_B, AArch64::ZAB0, MI,
2610                                   BB);
2611   case AArch64::INSERT_MXIPZ_V_PSEUDO_H:
2612     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_H, AArch64::ZAH0, MI,
2613                                   BB);
2614   case AArch64::INSERT_MXIPZ_V_PSEUDO_S:
2615     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_S, AArch64::ZAS0, MI,
2616                                   BB);
2617   case AArch64::INSERT_MXIPZ_V_PSEUDO_D:
2618     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_D, AArch64::ZAD0, MI,
2619                                   BB);
2620   case AArch64::INSERT_MXIPZ_V_PSEUDO_Q:
2621     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_Q, AArch64::ZAQ0, MI,
2622                                   BB);
2623   case AArch64::ZERO_M_PSEUDO:
2624     return EmitZero(MI, BB);
2625   case AArch64::ADDHA_MPPZ_PSEUDO_S:
2626     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_S, AArch64::ZAS0, MI, BB);
2627   case AArch64::ADDVA_MPPZ_PSEUDO_S:
2628     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_S, AArch64::ZAS0, MI, BB);
2629   case AArch64::ADDHA_MPPZ_PSEUDO_D:
2630     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_D, AArch64::ZAD0, MI, BB);
2631   case AArch64::ADDVA_MPPZ_PSEUDO_D:
2632     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_D, AArch64::ZAD0, MI, BB);
2633   }
2634 }
2635 
2636 //===----------------------------------------------------------------------===//
2637 // AArch64 Lowering private implementation.
2638 //===----------------------------------------------------------------------===//
2639 
2640 //===----------------------------------------------------------------------===//
2641 // Lowering Code
2642 //===----------------------------------------------------------------------===//
2643 
2644 // Forward declarations of SVE fixed length lowering helpers
2645 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
2646 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2647 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2648 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
2649                                                 SelectionDAG &DAG);
2650 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
2651                                              EVT VT);
2652 
2653 /// isZerosVector - Check whether SDNode N is a zero-filled vector.
2654 static bool isZerosVector(const SDNode *N) {
2655   // Look through a bit convert.
2656   while (N->getOpcode() == ISD::BITCAST)
2657     N = N->getOperand(0).getNode();
2658 
2659   if (ISD::isConstantSplatVectorAllZeros(N))
2660     return true;
2661 
2662   if (N->getOpcode() != AArch64ISD::DUP)
2663     return false;
2664 
2665   auto Opnd0 = N->getOperand(0);
2666   auto *CINT = dyn_cast<ConstantSDNode>(Opnd0);
2667   auto *CFP = dyn_cast<ConstantFPSDNode>(Opnd0);
2668   return (CINT && CINT->isZero()) || (CFP && CFP->isZero());
2669 }
2670 
2671 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
2672 /// CC
2673 static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
2674   switch (CC) {
2675   default:
2676     llvm_unreachable("Unknown condition code!");
2677   case ISD::SETNE:
2678     return AArch64CC::NE;
2679   case ISD::SETEQ:
2680     return AArch64CC::EQ;
2681   case ISD::SETGT:
2682     return AArch64CC::GT;
2683   case ISD::SETGE:
2684     return AArch64CC::GE;
2685   case ISD::SETLT:
2686     return AArch64CC::LT;
2687   case ISD::SETLE:
2688     return AArch64CC::LE;
2689   case ISD::SETUGT:
2690     return AArch64CC::HI;
2691   case ISD::SETUGE:
2692     return AArch64CC::HS;
2693   case ISD::SETULT:
2694     return AArch64CC::LO;
2695   case ISD::SETULE:
2696     return AArch64CC::LS;
2697   }
2698 }
2699 
2700 /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
2701 static void changeFPCCToAArch64CC(ISD::CondCode CC,
2702                                   AArch64CC::CondCode &CondCode,
2703                                   AArch64CC::CondCode &CondCode2) {
2704   CondCode2 = AArch64CC::AL;
2705   switch (CC) {
2706   default:
2707     llvm_unreachable("Unknown FP condition!");
2708   case ISD::SETEQ:
2709   case ISD::SETOEQ:
2710     CondCode = AArch64CC::EQ;
2711     break;
2712   case ISD::SETGT:
2713   case ISD::SETOGT:
2714     CondCode = AArch64CC::GT;
2715     break;
2716   case ISD::SETGE:
2717   case ISD::SETOGE:
2718     CondCode = AArch64CC::GE;
2719     break;
2720   case ISD::SETOLT:
2721     CondCode = AArch64CC::MI;
2722     break;
2723   case ISD::SETOLE:
2724     CondCode = AArch64CC::LS;
2725     break;
2726   case ISD::SETONE:
2727     CondCode = AArch64CC::MI;
2728     CondCode2 = AArch64CC::GT;
2729     break;
2730   case ISD::SETO:
2731     CondCode = AArch64CC::VC;
2732     break;
2733   case ISD::SETUO:
2734     CondCode = AArch64CC::VS;
2735     break;
2736   case ISD::SETUEQ:
2737     CondCode = AArch64CC::EQ;
2738     CondCode2 = AArch64CC::VS;
2739     break;
2740   case ISD::SETUGT:
2741     CondCode = AArch64CC::HI;
2742     break;
2743   case ISD::SETUGE:
2744     CondCode = AArch64CC::PL;
2745     break;
2746   case ISD::SETLT:
2747   case ISD::SETULT:
2748     CondCode = AArch64CC::LT;
2749     break;
2750   case ISD::SETLE:
2751   case ISD::SETULE:
2752     CondCode = AArch64CC::LE;
2753     break;
2754   case ISD::SETNE:
2755   case ISD::SETUNE:
2756     CondCode = AArch64CC::NE;
2757     break;
2758   }
2759 }
2760 
2761 /// Convert a DAG fp condition code to an AArch64 CC.
2762 /// This differs from changeFPCCToAArch64CC in that it returns cond codes that
2763 /// should be AND'ed instead of OR'ed.
2764 static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
2765                                      AArch64CC::CondCode &CondCode,
2766                                      AArch64CC::CondCode &CondCode2) {
2767   CondCode2 = AArch64CC::AL;
2768   switch (CC) {
2769   default:
2770     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2771     assert(CondCode2 == AArch64CC::AL);
2772     break;
2773   case ISD::SETONE:
2774     // (a one b)
2775     // == ((a olt b) || (a ogt b))
2776     // == ((a ord b) && (a une b))
2777     CondCode = AArch64CC::VC;
2778     CondCode2 = AArch64CC::NE;
2779     break;
2780   case ISD::SETUEQ:
2781     // (a ueq b)
2782     // == ((a uno b) || (a oeq b))
2783     // == ((a ule b) && (a uge b))
2784     CondCode = AArch64CC::PL;
2785     CondCode2 = AArch64CC::LE;
2786     break;
2787   }
2788 }
2789 
2790 /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
2791 /// CC usable with the vector instructions. Fewer operations are available
2792 /// without a real NZCV register, so we have to use less efficient combinations
2793 /// to get the same effect.
2794 static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
2795                                         AArch64CC::CondCode &CondCode,
2796                                         AArch64CC::CondCode &CondCode2,
2797                                         bool &Invert) {
2798   Invert = false;
2799   switch (CC) {
2800   default:
2801     // Mostly the scalar mappings work fine.
2802     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2803     break;
2804   case ISD::SETUO:
2805     Invert = true;
2806     LLVM_FALLTHROUGH;
2807   case ISD::SETO:
2808     CondCode = AArch64CC::MI;
2809     CondCode2 = AArch64CC::GE;
2810     break;
2811   case ISD::SETUEQ:
2812   case ISD::SETULT:
2813   case ISD::SETULE:
2814   case ISD::SETUGT:
2815   case ISD::SETUGE:
2816     // All of the compare-mask comparisons are ordered, but we can switch
2817     // between the two by a double inversion. E.g. ULE == !OGT.
2818     Invert = true;
2819     changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32),
2820                           CondCode, CondCode2);
2821     break;
2822   }
2823 }
2824 
2825 static bool isLegalArithImmed(uint64_t C) {
2826   // Matches AArch64DAGToDAGISel::SelectArithImmed().
2827   bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
2828   LLVM_DEBUG(dbgs() << "Is imm " << C
2829                     << " legal: " << (IsLegal ? "yes\n" : "no\n"));
2830   return IsLegal;
2831 }
2832 
2833 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
2834 // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
2835 // can be set differently by this operation. It comes down to whether
2836 // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
2837 // everything is fine. If not then the optimization is wrong. Thus general
2838 // comparisons are only valid if op2 != 0.
2839 //
2840 // So, finally, the only LLVM-native comparisons that don't mention C and V
2841 // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
2842 // the absence of information about op2.
2843 static bool isCMN(SDValue Op, ISD::CondCode CC) {
2844   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
2845          (CC == ISD::SETEQ || CC == ISD::SETNE);
2846 }
2847 
2848 static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
2849                                       SelectionDAG &DAG, SDValue Chain,
2850                                       bool IsSignaling) {
2851   EVT VT = LHS.getValueType();
2852   assert(VT != MVT::f128);
2853 
2854   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2855 
2856   if (VT == MVT::f16 && !FullFP16) {
2857     LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2858                       {Chain, LHS});
2859     RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2860                       {LHS.getValue(1), RHS});
2861     Chain = RHS.getValue(1);
2862     VT = MVT::f32;
2863   }
2864   unsigned Opcode =
2865       IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP;
2866   return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS});
2867 }
2868 
2869 static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2870                               const SDLoc &dl, SelectionDAG &DAG) {
2871   EVT VT = LHS.getValueType();
2872   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2873 
2874   if (VT.isFloatingPoint()) {
2875     assert(VT != MVT::f128);
2876     if (VT == MVT::f16 && !FullFP16) {
2877       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
2878       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
2879       VT = MVT::f32;
2880     }
2881     return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
2882   }
2883 
2884   // The CMP instruction is just an alias for SUBS, and representing it as
2885   // SUBS means that it's possible to get CSE with subtract operations.
2886   // A later phase can perform the optimization of setting the destination
2887   // register to WZR/XZR if it ends up being unused.
2888   unsigned Opcode = AArch64ISD::SUBS;
2889 
2890   if (isCMN(RHS, CC)) {
2891     // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
2892     Opcode = AArch64ISD::ADDS;
2893     RHS = RHS.getOperand(1);
2894   } else if (isCMN(LHS, CC)) {
2895     // As we are looking for EQ/NE compares, the operands can be commuted ; can
2896     // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
2897     Opcode = AArch64ISD::ADDS;
2898     LHS = LHS.getOperand(1);
2899   } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
2900     if (LHS.getOpcode() == ISD::AND) {
2901       // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
2902       // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
2903       // of the signed comparisons.
2904       const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl,
2905                                            DAG.getVTList(VT, MVT_CC),
2906                                            LHS.getOperand(0),
2907                                            LHS.getOperand(1));
2908       // Replace all users of (and X, Y) with newly generated (ands X, Y)
2909       DAG.ReplaceAllUsesWith(LHS, ANDSNode);
2910       return ANDSNode.getValue(1);
2911     } else if (LHS.getOpcode() == AArch64ISD::ANDS) {
2912       // Use result of ANDS
2913       return LHS.getValue(1);
2914     }
2915   }
2916 
2917   return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
2918       .getValue(1);
2919 }
2920 
2921 /// \defgroup AArch64CCMP CMP;CCMP matching
2922 ///
2923 /// These functions deal with the formation of CMP;CCMP;... sequences.
2924 /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
2925 /// a comparison. They set the NZCV flags to a predefined value if their
2926 /// predicate is false. This allows to express arbitrary conjunctions, for
2927 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
2928 /// expressed as:
2929 ///   cmp A
2930 ///   ccmp B, inv(CB), CA
2931 ///   check for CB flags
2932 ///
2933 /// This naturally lets us implement chains of AND operations with SETCC
2934 /// operands. And we can even implement some other situations by transforming
2935 /// them:
2936 ///   - We can implement (NEG SETCC) i.e. negating a single comparison by
2937 ///     negating the flags used in a CCMP/FCCMP operations.
2938 ///   - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
2939 ///     by negating the flags we test for afterwards. i.e.
2940 ///     NEG (CMP CCMP CCCMP ...) can be implemented.
2941 ///   - Note that we can only ever negate all previously processed results.
2942 ///     What we can not implement by flipping the flags to test is a negation
2943 ///     of two sub-trees (because the negation affects all sub-trees emitted so
2944 ///     far, so the 2nd sub-tree we emit would also affect the first).
2945 /// With those tools we can implement some OR operations:
2946 ///   - (OR (SETCC A) (SETCC B)) can be implemented via:
2947 ///     NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
2948 ///   - After transforming OR to NEG/AND combinations we may be able to use NEG
2949 ///     elimination rules from earlier to implement the whole thing as a
2950 ///     CCMP/FCCMP chain.
2951 ///
2952 /// As complete example:
2953 ///     or (or (setCA (cmp A)) (setCB (cmp B)))
2954 ///        (and (setCC (cmp C)) (setCD (cmp D)))"
2955 /// can be reassociated to:
2956 ///     or (and (setCC (cmp C)) setCD (cmp D))
2957 //         (or (setCA (cmp A)) (setCB (cmp B)))
2958 /// can be transformed to:
2959 ///     not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
2960 ///              (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
2961 /// which can be implemented as:
2962 ///   cmp C
2963 ///   ccmp D, inv(CD), CC
2964 ///   ccmp A, CA, inv(CD)
2965 ///   ccmp B, CB, inv(CA)
2966 ///   check for CB flags
2967 ///
2968 /// A counterexample is "or (and A B) (and C D)" which translates to
2969 /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
2970 /// can only implement 1 of the inner (not) operations, but not both!
2971 /// @{
2972 
2973 /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
2974 static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
2975                                          ISD::CondCode CC, SDValue CCOp,
2976                                          AArch64CC::CondCode Predicate,
2977                                          AArch64CC::CondCode OutCC,
2978                                          const SDLoc &DL, SelectionDAG &DAG) {
2979   unsigned Opcode = 0;
2980   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2981 
2982   if (LHS.getValueType().isFloatingPoint()) {
2983     assert(LHS.getValueType() != MVT::f128);
2984     if (LHS.getValueType() == MVT::f16 && !FullFP16) {
2985       LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
2986       RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
2987     }
2988     Opcode = AArch64ISD::FCCMP;
2989   } else if (RHS.getOpcode() == ISD::SUB) {
2990     SDValue SubOp0 = RHS.getOperand(0);
2991     if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2992       // See emitComparison() on why we can only do this for SETEQ and SETNE.
2993       Opcode = AArch64ISD::CCMN;
2994       RHS = RHS.getOperand(1);
2995     }
2996   }
2997   if (Opcode == 0)
2998     Opcode = AArch64ISD::CCMP;
2999 
3000   SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
3001   AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
3002   unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
3003   SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
3004   return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
3005 }
3006 
3007 /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
3008 /// expressed as a conjunction. See \ref AArch64CCMP.
3009 /// \param CanNegate    Set to true if we can negate the whole sub-tree just by
3010 ///                     changing the conditions on the SETCC tests.
3011 ///                     (this means we can call emitConjunctionRec() with
3012 ///                      Negate==true on this sub-tree)
3013 /// \param MustBeFirst  Set to true if this subtree needs to be negated and we
3014 ///                     cannot do the negation naturally. We are required to
3015 ///                     emit the subtree first in this case.
3016 /// \param WillNegate   Is true if are called when the result of this
3017 ///                     subexpression must be negated. This happens when the
3018 ///                     outer expression is an OR. We can use this fact to know
3019 ///                     that we have a double negation (or (or ...) ...) that
3020 ///                     can be implemented for free.
3021 static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
3022                                bool &MustBeFirst, bool WillNegate,
3023                                unsigned Depth = 0) {
3024   if (!Val.hasOneUse())
3025     return false;
3026   unsigned Opcode = Val->getOpcode();
3027   if (Opcode == ISD::SETCC) {
3028     if (Val->getOperand(0).getValueType() == MVT::f128)
3029       return false;
3030     CanNegate = true;
3031     MustBeFirst = false;
3032     return true;
3033   }
3034   // Protect against exponential runtime and stack overflow.
3035   if (Depth > 6)
3036     return false;
3037   if (Opcode == ISD::AND || Opcode == ISD::OR) {
3038     bool IsOR = Opcode == ISD::OR;
3039     SDValue O0 = Val->getOperand(0);
3040     SDValue O1 = Val->getOperand(1);
3041     bool CanNegateL;
3042     bool MustBeFirstL;
3043     if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
3044       return false;
3045     bool CanNegateR;
3046     bool MustBeFirstR;
3047     if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
3048       return false;
3049 
3050     if (MustBeFirstL && MustBeFirstR)
3051       return false;
3052 
3053     if (IsOR) {
3054       // For an OR expression we need to be able to naturally negate at least
3055       // one side or we cannot do the transformation at all.
3056       if (!CanNegateL && !CanNegateR)
3057         return false;
3058       // If we the result of the OR will be negated and we can naturally negate
3059       // the leafs, then this sub-tree as a whole negates naturally.
3060       CanNegate = WillNegate && CanNegateL && CanNegateR;
3061       // If we cannot naturally negate the whole sub-tree, then this must be
3062       // emitted first.
3063       MustBeFirst = !CanNegate;
3064     } else {
3065       assert(Opcode == ISD::AND && "Must be OR or AND");
3066       // We cannot naturally negate an AND operation.
3067       CanNegate = false;
3068       MustBeFirst = MustBeFirstL || MustBeFirstR;
3069     }
3070     return true;
3071   }
3072   return false;
3073 }
3074 
3075 /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
3076 /// of CCMP/CFCMP ops. See @ref AArch64CCMP.
3077 /// Tries to transform the given i1 producing node @p Val to a series compare
3078 /// and conditional compare operations. @returns an NZCV flags producing node
3079 /// and sets @p OutCC to the flags that should be tested or returns SDValue() if
3080 /// transformation was not possible.
3081 /// \p Negate is true if we want this sub-tree being negated just by changing
3082 /// SETCC conditions.
3083 static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
3084     AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
3085     AArch64CC::CondCode Predicate) {
3086   // We're at a tree leaf, produce a conditional comparison operation.
3087   unsigned Opcode = Val->getOpcode();
3088   if (Opcode == ISD::SETCC) {
3089     SDValue LHS = Val->getOperand(0);
3090     SDValue RHS = Val->getOperand(1);
3091     ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
3092     bool isInteger = LHS.getValueType().isInteger();
3093     if (Negate)
3094       CC = getSetCCInverse(CC, LHS.getValueType());
3095     SDLoc DL(Val);
3096     // Determine OutCC and handle FP special case.
3097     if (isInteger) {
3098       OutCC = changeIntCCToAArch64CC(CC);
3099     } else {
3100       assert(LHS.getValueType().isFloatingPoint());
3101       AArch64CC::CondCode ExtraCC;
3102       changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
3103       // Some floating point conditions can't be tested with a single condition
3104       // code. Construct an additional comparison in this case.
3105       if (ExtraCC != AArch64CC::AL) {
3106         SDValue ExtraCmp;
3107         if (!CCOp.getNode())
3108           ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
3109         else
3110           ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
3111                                                ExtraCC, DL, DAG);
3112         CCOp = ExtraCmp;
3113         Predicate = ExtraCC;
3114       }
3115     }
3116 
3117     // Produce a normal comparison if we are first in the chain
3118     if (!CCOp)
3119       return emitComparison(LHS, RHS, CC, DL, DAG);
3120     // Otherwise produce a ccmp.
3121     return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
3122                                      DAG);
3123   }
3124   assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
3125 
3126   bool IsOR = Opcode == ISD::OR;
3127 
3128   SDValue LHS = Val->getOperand(0);
3129   bool CanNegateL;
3130   bool MustBeFirstL;
3131   bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
3132   assert(ValidL && "Valid conjunction/disjunction tree");
3133   (void)ValidL;
3134 
3135   SDValue RHS = Val->getOperand(1);
3136   bool CanNegateR;
3137   bool MustBeFirstR;
3138   bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
3139   assert(ValidR && "Valid conjunction/disjunction tree");
3140   (void)ValidR;
3141 
3142   // Swap sub-tree that must come first to the right side.
3143   if (MustBeFirstL) {
3144     assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
3145     std::swap(LHS, RHS);
3146     std::swap(CanNegateL, CanNegateR);
3147     std::swap(MustBeFirstL, MustBeFirstR);
3148   }
3149 
3150   bool NegateR;
3151   bool NegateAfterR;
3152   bool NegateL;
3153   bool NegateAfterAll;
3154   if (Opcode == ISD::OR) {
3155     // Swap the sub-tree that we can negate naturally to the left.
3156     if (!CanNegateL) {
3157       assert(CanNegateR && "at least one side must be negatable");
3158       assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
3159       assert(!Negate);
3160       std::swap(LHS, RHS);
3161       NegateR = false;
3162       NegateAfterR = true;
3163     } else {
3164       // Negate the left sub-tree if possible, otherwise negate the result.
3165       NegateR = CanNegateR;
3166       NegateAfterR = !CanNegateR;
3167     }
3168     NegateL = true;
3169     NegateAfterAll = !Negate;
3170   } else {
3171     assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
3172     assert(!Negate && "Valid conjunction/disjunction tree");
3173 
3174     NegateL = false;
3175     NegateR = false;
3176     NegateAfterR = false;
3177     NegateAfterAll = false;
3178   }
3179 
3180   // Emit sub-trees.
3181   AArch64CC::CondCode RHSCC;
3182   SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
3183   if (NegateAfterR)
3184     RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
3185   SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
3186   if (NegateAfterAll)
3187     OutCC = AArch64CC::getInvertedCondCode(OutCC);
3188   return CmpL;
3189 }
3190 
3191 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
3192 /// In some cases this is even possible with OR operations in the expression.
3193 /// See \ref AArch64CCMP.
3194 /// \see emitConjunctionRec().
3195 static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
3196                                AArch64CC::CondCode &OutCC) {
3197   bool DummyCanNegate;
3198   bool DummyMustBeFirst;
3199   if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
3200     return SDValue();
3201 
3202   return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
3203 }
3204 
3205 /// @}
3206 
3207 /// Returns how profitable it is to fold a comparison's operand's shift and/or
3208 /// extension operations.
3209 static unsigned getCmpOperandFoldingProfit(SDValue Op) {
3210   auto isSupportedExtend = [&](SDValue V) {
3211     if (V.getOpcode() == ISD::SIGN_EXTEND_INREG)
3212       return true;
3213 
3214     if (V.getOpcode() == ISD::AND)
3215       if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3216         uint64_t Mask = MaskCst->getZExtValue();
3217         return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
3218       }
3219 
3220     return false;
3221   };
3222 
3223   if (!Op.hasOneUse())
3224     return 0;
3225 
3226   if (isSupportedExtend(Op))
3227     return 1;
3228 
3229   unsigned Opc = Op.getOpcode();
3230   if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA)
3231     if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3232       uint64_t Shift = ShiftCst->getZExtValue();
3233       if (isSupportedExtend(Op.getOperand(0)))
3234         return (Shift <= 4) ? 2 : 1;
3235       EVT VT = Op.getValueType();
3236       if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63))
3237         return 1;
3238     }
3239 
3240   return 0;
3241 }
3242 
3243 static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3244                              SDValue &AArch64cc, SelectionDAG &DAG,
3245                              const SDLoc &dl) {
3246   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3247     EVT VT = RHS.getValueType();
3248     uint64_t C = RHSC->getZExtValue();
3249     if (!isLegalArithImmed(C)) {
3250       // Constant does not fit, try adjusting it by one?
3251       switch (CC) {
3252       default:
3253         break;
3254       case ISD::SETLT:
3255       case ISD::SETGE:
3256         if ((VT == MVT::i32 && C != 0x80000000 &&
3257              isLegalArithImmed((uint32_t)(C - 1))) ||
3258             (VT == MVT::i64 && C != 0x80000000ULL &&
3259              isLegalArithImmed(C - 1ULL))) {
3260           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3261           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3262           RHS = DAG.getConstant(C, dl, VT);
3263         }
3264         break;
3265       case ISD::SETULT:
3266       case ISD::SETUGE:
3267         if ((VT == MVT::i32 && C != 0 &&
3268              isLegalArithImmed((uint32_t)(C - 1))) ||
3269             (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
3270           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3271           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3272           RHS = DAG.getConstant(C, dl, VT);
3273         }
3274         break;
3275       case ISD::SETLE:
3276       case ISD::SETGT:
3277         if ((VT == MVT::i32 && C != INT32_MAX &&
3278              isLegalArithImmed((uint32_t)(C + 1))) ||
3279             (VT == MVT::i64 && C != INT64_MAX &&
3280              isLegalArithImmed(C + 1ULL))) {
3281           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3282           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3283           RHS = DAG.getConstant(C, dl, VT);
3284         }
3285         break;
3286       case ISD::SETULE:
3287       case ISD::SETUGT:
3288         if ((VT == MVT::i32 && C != UINT32_MAX &&
3289              isLegalArithImmed((uint32_t)(C + 1))) ||
3290             (VT == MVT::i64 && C != UINT64_MAX &&
3291              isLegalArithImmed(C + 1ULL))) {
3292           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3293           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3294           RHS = DAG.getConstant(C, dl, VT);
3295         }
3296         break;
3297       }
3298     }
3299   }
3300 
3301   // Comparisons are canonicalized so that the RHS operand is simpler than the
3302   // LHS one, the extreme case being when RHS is an immediate. However, AArch64
3303   // can fold some shift+extend operations on the RHS operand, so swap the
3304   // operands if that can be done.
3305   //
3306   // For example:
3307   //    lsl     w13, w11, #1
3308   //    cmp     w13, w12
3309   // can be turned into:
3310   //    cmp     w12, w11, lsl #1
3311   if (!isa<ConstantSDNode>(RHS) ||
3312       !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
3313     SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
3314 
3315     if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
3316       std::swap(LHS, RHS);
3317       CC = ISD::getSetCCSwappedOperands(CC);
3318     }
3319   }
3320 
3321   SDValue Cmp;
3322   AArch64CC::CondCode AArch64CC;
3323   if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
3324     const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
3325 
3326     // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
3327     // For the i8 operand, the largest immediate is 255, so this can be easily
3328     // encoded in the compare instruction. For the i16 operand, however, the
3329     // largest immediate cannot be encoded in the compare.
3330     // Therefore, use a sign extending load and cmn to avoid materializing the
3331     // -1 constant. For example,
3332     // movz w1, #65535
3333     // ldrh w0, [x0, #0]
3334     // cmp w0, w1
3335     // >
3336     // ldrsh w0, [x0, #0]
3337     // cmn w0, #1
3338     // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
3339     // if and only if (sext LHS) == (sext RHS). The checks are in place to
3340     // ensure both the LHS and RHS are truly zero extended and to make sure the
3341     // transformation is profitable.
3342     if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
3343         cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
3344         cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
3345         LHS.getNode()->hasNUsesOfValue(1, 0)) {
3346       int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
3347       if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
3348         SDValue SExt =
3349             DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
3350                         DAG.getValueType(MVT::i16));
3351         Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
3352                                                    RHS.getValueType()),
3353                              CC, dl, DAG);
3354         AArch64CC = changeIntCCToAArch64CC(CC);
3355       }
3356     }
3357 
3358     if (!Cmp && (RHSC->isZero() || RHSC->isOne())) {
3359       if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
3360         if ((CC == ISD::SETNE) ^ RHSC->isZero())
3361           AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
3362       }
3363     }
3364   }
3365 
3366   if (!Cmp) {
3367     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
3368     AArch64CC = changeIntCCToAArch64CC(CC);
3369   }
3370   AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
3371   return Cmp;
3372 }
3373 
3374 static std::pair<SDValue, SDValue>
3375 getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
3376   assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
3377          "Unsupported value type");
3378   SDValue Value, Overflow;
3379   SDLoc DL(Op);
3380   SDValue LHS = Op.getOperand(0);
3381   SDValue RHS = Op.getOperand(1);
3382   unsigned Opc = 0;
3383   switch (Op.getOpcode()) {
3384   default:
3385     llvm_unreachable("Unknown overflow instruction!");
3386   case ISD::SADDO:
3387     Opc = AArch64ISD::ADDS;
3388     CC = AArch64CC::VS;
3389     break;
3390   case ISD::UADDO:
3391     Opc = AArch64ISD::ADDS;
3392     CC = AArch64CC::HS;
3393     break;
3394   case ISD::SSUBO:
3395     Opc = AArch64ISD::SUBS;
3396     CC = AArch64CC::VS;
3397     break;
3398   case ISD::USUBO:
3399     Opc = AArch64ISD::SUBS;
3400     CC = AArch64CC::LO;
3401     break;
3402   // Multiply needs a little bit extra work.
3403   case ISD::SMULO:
3404   case ISD::UMULO: {
3405     CC = AArch64CC::NE;
3406     bool IsSigned = Op.getOpcode() == ISD::SMULO;
3407     if (Op.getValueType() == MVT::i32) {
3408       // Extend to 64-bits, then perform a 64-bit multiply.
3409       unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3410       LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
3411       RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
3412       SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3413       Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
3414 
3415       // Check that the result fits into a 32-bit integer.
3416       SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC);
3417       if (IsSigned) {
3418         // cmp xreg, wreg, sxtw
3419         SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value);
3420         Overflow =
3421             DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1);
3422       } else {
3423         // tst xreg, #0xffffffff00000000
3424         SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64);
3425         Overflow =
3426             DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1);
3427       }
3428       break;
3429     }
3430     assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
3431     // For the 64 bit multiply
3432     Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3433     if (IsSigned) {
3434       SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
3435       SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
3436                                       DAG.getConstant(63, DL, MVT::i64));
3437       // It is important that LowerBits is last, otherwise the arithmetic
3438       // shift will not be folded into the compare (SUBS).
3439       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3440       Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
3441                      .getValue(1);
3442     } else {
3443       SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
3444       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3445       Overflow =
3446           DAG.getNode(AArch64ISD::SUBS, DL, VTs,
3447                       DAG.getConstant(0, DL, MVT::i64),
3448                       UpperBits).getValue(1);
3449     }
3450     break;
3451   }
3452   } // switch (...)
3453 
3454   if (Opc) {
3455     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
3456 
3457     // Emit the AArch64 operation with overflow check.
3458     Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
3459     Overflow = Value.getValue(1);
3460   }
3461   return std::make_pair(Value, Overflow);
3462 }
3463 
3464 SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
3465   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3466     return LowerToScalableOp(Op, DAG);
3467 
3468   SDValue Sel = Op.getOperand(0);
3469   SDValue Other = Op.getOperand(1);
3470   SDLoc dl(Sel);
3471 
3472   // If the operand is an overflow checking operation, invert the condition
3473   // code and kill the Not operation. I.e., transform:
3474   // (xor (overflow_op_bool, 1))
3475   //   -->
3476   // (csel 1, 0, invert(cc), overflow_op_bool)
3477   // ... which later gets transformed to just a cset instruction with an
3478   // inverted condition code, rather than a cset + eor sequence.
3479   if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) {
3480     // Only lower legal XALUO ops.
3481     if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0)))
3482       return SDValue();
3483 
3484     SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3485     SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3486     AArch64CC::CondCode CC;
3487     SDValue Value, Overflow;
3488     std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG);
3489     SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3490     return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal,
3491                        CCVal, Overflow);
3492   }
3493   // If neither operand is a SELECT_CC, give up.
3494   if (Sel.getOpcode() != ISD::SELECT_CC)
3495     std::swap(Sel, Other);
3496   if (Sel.getOpcode() != ISD::SELECT_CC)
3497     return Op;
3498 
3499   // The folding we want to perform is:
3500   // (xor x, (select_cc a, b, cc, 0, -1) )
3501   //   -->
3502   // (csel x, (xor x, -1), cc ...)
3503   //
3504   // The latter will get matched to a CSINV instruction.
3505 
3506   ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
3507   SDValue LHS = Sel.getOperand(0);
3508   SDValue RHS = Sel.getOperand(1);
3509   SDValue TVal = Sel.getOperand(2);
3510   SDValue FVal = Sel.getOperand(3);
3511 
3512   // FIXME: This could be generalized to non-integer comparisons.
3513   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
3514     return Op;
3515 
3516   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
3517   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
3518 
3519   // The values aren't constants, this isn't the pattern we're looking for.
3520   if (!CFVal || !CTVal)
3521     return Op;
3522 
3523   // We can commute the SELECT_CC by inverting the condition.  This
3524   // might be needed to make this fit into a CSINV pattern.
3525   if (CTVal->isAllOnes() && CFVal->isZero()) {
3526     std::swap(TVal, FVal);
3527     std::swap(CTVal, CFVal);
3528     CC = ISD::getSetCCInverse(CC, LHS.getValueType());
3529   }
3530 
3531   // If the constants line up, perform the transform!
3532   if (CTVal->isZero() && CFVal->isAllOnes()) {
3533     SDValue CCVal;
3534     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
3535 
3536     FVal = Other;
3537     TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
3538                        DAG.getConstant(-1ULL, dl, Other.getValueType()));
3539 
3540     return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
3541                        CCVal, Cmp);
3542   }
3543 
3544   return Op;
3545 }
3546 
3547 // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
3548 // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
3549 // sets 'C' bit to 0.
3550 static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {
3551   SDLoc DL(Value);
3552   EVT VT = Value.getValueType();
3553   SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value;
3554   SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT);
3555   SDValue Cmp =
3556       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1);
3557   return Cmp.getValue(1);
3558 }
3559 
3560 // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
3561 // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
3562 static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG,
3563                                 bool Invert) {
3564   assert(Flag.getResNo() == 1);
3565   SDLoc DL(Flag);
3566   SDValue Zero = DAG.getConstant(0, DL, VT);
3567   SDValue One = DAG.getConstant(1, DL, VT);
3568   unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS;
3569   SDValue CC = DAG.getConstant(Cond, DL, MVT::i32);
3570   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3571 }
3572 
3573 // Value is 1 if 'V' bit of NZCV is 1, else 0
3574 static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
3575   assert(Flag.getResNo() == 1);
3576   SDLoc DL(Flag);
3577   SDValue Zero = DAG.getConstant(0, DL, VT);
3578   SDValue One = DAG.getConstant(1, DL, VT);
3579   SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
3580   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3581 }
3582 
3583 // This lowering is inefficient, but it will get cleaned up by
3584 // `foldOverflowCheck`
3585 static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
3586                                 bool IsSigned) {
3587   EVT VT0 = Op.getValue(0).getValueType();
3588   EVT VT1 = Op.getValue(1).getValueType();
3589 
3590   if (VT0 != MVT::i32 && VT0 != MVT::i64)
3591     return SDValue();
3592 
3593   bool InvertCarry = Opcode == AArch64ISD::SBCS;
3594   SDValue OpLHS = Op.getOperand(0);
3595   SDValue OpRHS = Op.getOperand(1);
3596   SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry);
3597 
3598   SDLoc DL(Op);
3599   SDVTList VTs = DAG.getVTList(VT0, VT1);
3600 
3601   SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
3602                             OpRHS, OpCarryIn);
3603 
3604   SDValue OutFlag =
3605       IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
3606                : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry);
3607 
3608   return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
3609 }
3610 
3611 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
3612   // Let legalize expand this if it isn't a legal type yet.
3613   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3614     return SDValue();
3615 
3616   SDLoc dl(Op);
3617   AArch64CC::CondCode CC;
3618   // The actual operation that sets the overflow or carry flag.
3619   SDValue Value, Overflow;
3620   std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
3621 
3622   // We use 0 and 1 as false and true values.
3623   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3624   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3625 
3626   // We use an inverted condition, because the conditional select is inverted
3627   // too. This will allow it to be selected to a single instruction:
3628   // CSINC Wd, WZR, WZR, invert(cond).
3629   SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3630   Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
3631                          CCVal, Overflow);
3632 
3633   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3634   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3635 }
3636 
3637 // Prefetch operands are:
3638 // 1: Address to prefetch
3639 // 2: bool isWrite
3640 // 3: int locality (0 = no locality ... 3 = extreme locality)
3641 // 4: bool isDataCache
3642 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
3643   SDLoc DL(Op);
3644   unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3645   unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
3646   unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3647 
3648   bool IsStream = !Locality;
3649   // When the locality number is set
3650   if (Locality) {
3651     // The front-end should have filtered out the out-of-range values
3652     assert(Locality <= 3 && "Prefetch locality out-of-range");
3653     // The locality degree is the opposite of the cache speed.
3654     // Put the number the other way around.
3655     // The encoding starts at 0 for level 1
3656     Locality = 3 - Locality;
3657   }
3658 
3659   // built the mask value encoding the expected behavior.
3660   unsigned PrfOp = (IsWrite << 4) |     // Load/Store bit
3661                    (!IsData << 3) |     // IsDataCache bit
3662                    (Locality << 1) |    // Cache level bits
3663                    (unsigned)IsStream;  // Stream bit
3664   return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
3665                      DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1));
3666 }
3667 
3668 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
3669                                               SelectionDAG &DAG) const {
3670   EVT VT = Op.getValueType();
3671   if (VT.isScalableVector())
3672     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
3673 
3674   if (useSVEForFixedLengthVectorVT(VT))
3675     return LowerFixedLengthFPExtendToSVE(Op, DAG);
3676 
3677   assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
3678   return SDValue();
3679 }
3680 
3681 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
3682                                              SelectionDAG &DAG) const {
3683   if (Op.getValueType().isScalableVector())
3684     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
3685 
3686   bool IsStrict = Op->isStrictFPOpcode();
3687   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3688   EVT SrcVT = SrcVal.getValueType();
3689 
3690   if (useSVEForFixedLengthVectorVT(SrcVT))
3691     return LowerFixedLengthFPRoundToSVE(Op, DAG);
3692 
3693   if (SrcVT != MVT::f128) {
3694     // Expand cases where the input is a vector bigger than NEON.
3695     if (useSVEForFixedLengthVectorVT(SrcVT))
3696       return SDValue();
3697 
3698     // It's legal except when f128 is involved
3699     return Op;
3700   }
3701 
3702   return SDValue();
3703 }
3704 
3705 SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
3706                                                     SelectionDAG &DAG) const {
3707   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3708   // Any additional optimization in this function should be recorded
3709   // in the cost tables.
3710   bool IsStrict = Op->isStrictFPOpcode();
3711   EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType();
3712   EVT VT = Op.getValueType();
3713 
3714   if (VT.isScalableVector()) {
3715     unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT
3716                           ? AArch64ISD::FCVTZU_MERGE_PASSTHRU
3717                           : AArch64ISD::FCVTZS_MERGE_PASSTHRU;
3718     return LowerToPredicatedOp(Op, DAG, Opcode);
3719   }
3720 
3721   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3722     return LowerFixedLengthFPToIntToSVE(Op, DAG);
3723 
3724   unsigned NumElts = InVT.getVectorNumElements();
3725 
3726   // f16 conversions are promoted to f32 when full fp16 is not supported.
3727   if (InVT.getVectorElementType() == MVT::f16 &&
3728       !Subtarget->hasFullFP16()) {
3729     MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts);
3730     SDLoc dl(Op);
3731     if (IsStrict) {
3732       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other},
3733                                 {Op.getOperand(0), Op.getOperand(1)});
3734       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3735                          {Ext.getValue(1), Ext.getValue(0)});
3736     }
3737     return DAG.getNode(
3738         Op.getOpcode(), dl, Op.getValueType(),
3739         DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0)));
3740   }
3741 
3742   uint64_t VTSize = VT.getFixedSizeInBits();
3743   uint64_t InVTSize = InVT.getFixedSizeInBits();
3744   if (VTSize < InVTSize) {
3745     SDLoc dl(Op);
3746     if (IsStrict) {
3747       InVT = InVT.changeVectorElementTypeToInteger();
3748       SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other},
3749                                {Op.getOperand(0), Op.getOperand(1)});
3750       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3751       return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl);
3752     }
3753     SDValue Cv =
3754         DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
3755                     Op.getOperand(0));
3756     return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3757   }
3758 
3759   if (VTSize > InVTSize) {
3760     SDLoc dl(Op);
3761     MVT ExtVT =
3762         MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
3763                          VT.getVectorNumElements());
3764     if (IsStrict) {
3765       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other},
3766                                 {Op.getOperand(0), Op.getOperand(1)});
3767       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3768                          {Ext.getValue(1), Ext.getValue(0)});
3769     }
3770     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
3771     return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
3772   }
3773 
3774   // Use a scalar operation for conversions between single-element vectors of
3775   // the same size.
3776   if (NumElts == 1) {
3777     SDLoc dl(Op);
3778     SDValue Extract = DAG.getNode(
3779         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
3780         Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64));
3781     EVT ScalarVT = VT.getScalarType();
3782     if (IsStrict)
3783       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
3784                          {Op.getOperand(0), Extract});
3785     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
3786   }
3787 
3788   // Type changing conversions are illegal.
3789   return Op;
3790 }
3791 
3792 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
3793                                               SelectionDAG &DAG) const {
3794   bool IsStrict = Op->isStrictFPOpcode();
3795   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3796 
3797   if (SrcVal.getValueType().isVector())
3798     return LowerVectorFP_TO_INT(Op, DAG);
3799 
3800   // f16 conversions are promoted to f32 when full fp16 is not supported.
3801   if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
3802     SDLoc dl(Op);
3803     if (IsStrict) {
3804       SDValue Ext =
3805           DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
3806                       {Op.getOperand(0), SrcVal});
3807       return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other},
3808                          {Ext.getValue(1), Ext.getValue(0)});
3809     }
3810     return DAG.getNode(
3811         Op.getOpcode(), dl, Op.getValueType(),
3812         DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal));
3813   }
3814 
3815   if (SrcVal.getValueType() != MVT::f128) {
3816     // It's legal except when f128 is involved
3817     return Op;
3818   }
3819 
3820   return SDValue();
3821 }
3822 
3823 SDValue
3824 AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
3825                                                 SelectionDAG &DAG) const {
3826   // AArch64 FP-to-int conversions saturate to the destination element size, so
3827   // we can lower common saturating conversions to simple instructions.
3828   SDValue SrcVal = Op.getOperand(0);
3829   EVT SrcVT = SrcVal.getValueType();
3830   EVT DstVT = Op.getValueType();
3831   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3832 
3833   uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits();
3834   uint64_t DstElementWidth = DstVT.getScalarSizeInBits();
3835   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3836   assert(SatWidth <= DstElementWidth &&
3837          "Saturation width cannot exceed result width");
3838 
3839   // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
3840   // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
3841   // types, so this is hard to reach.
3842   if (DstVT.isScalableVector())
3843     return SDValue();
3844 
3845   EVT SrcElementVT = SrcVT.getVectorElementType();
3846 
3847   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3848   if (SrcElementVT == MVT::f16 &&
3849       (!Subtarget->hasFullFP16() || DstElementWidth > 16)) {
3850     MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements());
3851     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal);
3852     SrcVT = F32VT;
3853     SrcElementVT = MVT::f32;
3854     SrcElementWidth = 32;
3855   } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 &&
3856              SrcElementVT != MVT::f16)
3857     return SDValue();
3858 
3859   SDLoc DL(Op);
3860   // Cases that we can emit directly.
3861   if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth)
3862     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3863                        DAG.getValueType(DstVT.getScalarType()));
3864 
3865   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3866   // result. This is only valid if the legal cvt is larger than the saturate
3867   // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize
3868   // (at least until sqxtn is selected).
3869   if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64)
3870     return SDValue();
3871 
3872   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
3873   SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal,
3874                                   DAG.getValueType(IntVT.getScalarType()));
3875   SDValue Sat;
3876   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3877     SDValue MinC = DAG.getConstant(
3878         APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3879     SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
3880     SDValue MaxC = DAG.getConstant(
3881         APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3882     Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
3883   } else {
3884     SDValue MinC = DAG.getConstant(
3885         APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
3886     Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
3887   }
3888 
3889   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3890 }
3891 
3892 SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
3893                                                   SelectionDAG &DAG) const {
3894   // AArch64 FP-to-int conversions saturate to the destination register size, so
3895   // we can lower common saturating conversions to simple instructions.
3896   SDValue SrcVal = Op.getOperand(0);
3897   EVT SrcVT = SrcVal.getValueType();
3898 
3899   if (SrcVT.isVector())
3900     return LowerVectorFP_TO_INT_SAT(Op, DAG);
3901 
3902   EVT DstVT = Op.getValueType();
3903   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3904   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3905   uint64_t DstWidth = DstVT.getScalarSizeInBits();
3906   assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width");
3907 
3908   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3909   if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) {
3910     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal);
3911     SrcVT = MVT::f32;
3912   } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16)
3913     return SDValue();
3914 
3915   SDLoc DL(Op);
3916   // Cases that we can emit directly.
3917   if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 ||
3918        (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) &&
3919       DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32))
3920     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3921                        DAG.getValueType(DstVT));
3922 
3923   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3924   // result. This is only valid if the legal cvt is larger than the saturate
3925   // width.
3926   if (DstWidth < SatWidth)
3927     return SDValue();
3928 
3929   SDValue NativeCvt =
3930       DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT));
3931   SDValue Sat;
3932   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3933     SDValue MinC = DAG.getConstant(
3934         APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
3935     SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
3936     SDValue MaxC = DAG.getConstant(
3937         APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
3938     Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
3939   } else {
3940     SDValue MinC = DAG.getConstant(
3941         APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
3942     Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
3943   }
3944 
3945   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3946 }
3947 
3948 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
3949                                                     SelectionDAG &DAG) const {
3950   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3951   // Any additional optimization in this function should be recorded
3952   // in the cost tables.
3953   bool IsStrict = Op->isStrictFPOpcode();
3954   EVT VT = Op.getValueType();
3955   SDLoc dl(Op);
3956   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
3957   EVT InVT = In.getValueType();
3958   unsigned Opc = Op.getOpcode();
3959   bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
3960 
3961   if (VT.isScalableVector()) {
3962     if (InVT.getVectorElementType() == MVT::i1) {
3963       // We can't directly extend an SVE predicate; extend it first.
3964       unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3965       EVT CastVT = getPromotedVTForPredicate(InVT);
3966       In = DAG.getNode(CastOpc, dl, CastVT, In);
3967       return DAG.getNode(Opc, dl, VT, In);
3968     }
3969 
3970     unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
3971                                : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
3972     return LowerToPredicatedOp(Op, DAG, Opcode);
3973   }
3974 
3975   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3976     return LowerFixedLengthIntToFPToSVE(Op, DAG);
3977 
3978   uint64_t VTSize = VT.getFixedSizeInBits();
3979   uint64_t InVTSize = InVT.getFixedSizeInBits();
3980   if (VTSize < InVTSize) {
3981     MVT CastVT =
3982         MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
3983                          InVT.getVectorNumElements());
3984     if (IsStrict) {
3985       In = DAG.getNode(Opc, dl, {CastVT, MVT::Other},
3986                        {Op.getOperand(0), In});
3987       return DAG.getNode(
3988           ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
3989           {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)});
3990     }
3991     In = DAG.getNode(Opc, dl, CastVT, In);
3992     return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl));
3993   }
3994 
3995   if (VTSize > InVTSize) {
3996     unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3997     EVT CastVT = VT.changeVectorElementTypeToInteger();
3998     In = DAG.getNode(CastOpc, dl, CastVT, In);
3999     if (IsStrict)
4000       return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In});
4001     return DAG.getNode(Opc, dl, VT, In);
4002   }
4003 
4004   // Use a scalar operation for conversions between single-element vectors of
4005   // the same size.
4006   if (VT.getVectorNumElements() == 1) {
4007     SDValue Extract = DAG.getNode(
4008         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
4009         In, DAG.getConstant(0, dl, MVT::i64));
4010     EVT ScalarVT = VT.getScalarType();
4011     if (IsStrict)
4012       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
4013                          {Op.getOperand(0), Extract});
4014     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
4015   }
4016 
4017   return Op;
4018 }
4019 
4020 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
4021                                             SelectionDAG &DAG) const {
4022   if (Op.getValueType().isVector())
4023     return LowerVectorINT_TO_FP(Op, DAG);
4024 
4025   bool IsStrict = Op->isStrictFPOpcode();
4026   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
4027 
4028   // f16 conversions are promoted to f32 when full fp16 is not supported.
4029   if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
4030     SDLoc dl(Op);
4031     if (IsStrict) {
4032       SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other},
4033                                 {Op.getOperand(0), SrcVal});
4034       return DAG.getNode(
4035           ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other},
4036           {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
4037     }
4038     return DAG.getNode(
4039         ISD::FP_ROUND, dl, MVT::f16,
4040         DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal),
4041         DAG.getIntPtrConstant(0, dl));
4042   }
4043 
4044   // i128 conversions are libcalls.
4045   if (SrcVal.getValueType() == MVT::i128)
4046     return SDValue();
4047 
4048   // Other conversions are legal, unless it's to the completely software-based
4049   // fp128.
4050   if (Op.getValueType() != MVT::f128)
4051     return Op;
4052   return SDValue();
4053 }
4054 
4055 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
4056                                             SelectionDAG &DAG) const {
4057   // For iOS, we want to call an alternative entry point: __sincos_stret,
4058   // which returns the values in two S / D registers.
4059   SDLoc dl(Op);
4060   SDValue Arg = Op.getOperand(0);
4061   EVT ArgVT = Arg.getValueType();
4062   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
4063 
4064   ArgListTy Args;
4065   ArgListEntry Entry;
4066 
4067   Entry.Node = Arg;
4068   Entry.Ty = ArgTy;
4069   Entry.IsSExt = false;
4070   Entry.IsZExt = false;
4071   Args.push_back(Entry);
4072 
4073   RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64
4074                                         : RTLIB::SINCOS_STRET_F32;
4075   const char *LibcallName = getLibcallName(LC);
4076   SDValue Callee =
4077       DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
4078 
4079   StructType *RetTy = StructType::get(ArgTy, ArgTy);
4080   TargetLowering::CallLoweringInfo CLI(DAG);
4081   CLI.setDebugLoc(dl)
4082       .setChain(DAG.getEntryNode())
4083       .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
4084 
4085   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
4086   return CallResult.first;
4087 }
4088 
4089 static MVT getSVEContainerType(EVT ContentTy);
4090 
4091 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
4092                                             SelectionDAG &DAG) const {
4093   EVT OpVT = Op.getValueType();
4094   EVT ArgVT = Op.getOperand(0).getValueType();
4095 
4096   if (useSVEForFixedLengthVectorVT(OpVT))
4097     return LowerFixedLengthBitcastToSVE(Op, DAG);
4098 
4099   if (OpVT.isScalableVector()) {
4100     // Bitcasting between unpacked vector types of different element counts is
4101     // not a NOP because the live elements are laid out differently.
4102     //                01234567
4103     // e.g. nxv2i32 = XX??XX??
4104     //      nxv4f16 = X?X?X?X?
4105     if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
4106       return SDValue();
4107 
4108     if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
4109       assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
4110              "Expected int->fp bitcast!");
4111       SDValue ExtResult =
4112           DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
4113                       Op.getOperand(0));
4114       return getSVESafeBitCast(OpVT, ExtResult, DAG);
4115     }
4116     return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
4117   }
4118 
4119   if (OpVT != MVT::f16 && OpVT != MVT::bf16)
4120     return SDValue();
4121 
4122   // Bitcasts between f16 and bf16 are legal.
4123   if (ArgVT == MVT::f16 || ArgVT == MVT::bf16)
4124     return Op;
4125 
4126   assert(ArgVT == MVT::i16);
4127   SDLoc DL(Op);
4128 
4129   Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
4130   Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
4131   return SDValue(
4132       DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op,
4133                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
4134       0);
4135 }
4136 
4137 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
4138   if (OrigVT.getSizeInBits() >= 64)
4139     return OrigVT;
4140 
4141   assert(OrigVT.isSimple() && "Expecting a simple value type");
4142 
4143   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
4144   switch (OrigSimpleTy) {
4145   default: llvm_unreachable("Unexpected Vector Type");
4146   case MVT::v2i8:
4147   case MVT::v2i16:
4148      return MVT::v2i32;
4149   case MVT::v4i8:
4150     return  MVT::v4i16;
4151   }
4152 }
4153 
4154 static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
4155                                                  const EVT &OrigTy,
4156                                                  const EVT &ExtTy,
4157                                                  unsigned ExtOpcode) {
4158   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
4159   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
4160   // 64-bits we need to insert a new extension so that it will be 64-bits.
4161   assert(ExtTy.is128BitVector() && "Unexpected extension size");
4162   if (OrigTy.getSizeInBits() >= 64)
4163     return N;
4164 
4165   // Must extend size to at least 64 bits to be used as an operand for VMULL.
4166   EVT NewVT = getExtensionTo64Bits(OrigTy);
4167 
4168   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
4169 }
4170 
4171 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4172                                    bool isSigned) {
4173   EVT VT = N->getValueType(0);
4174 
4175   if (N->getOpcode() != ISD::BUILD_VECTOR)
4176     return false;
4177 
4178   for (const SDValue &Elt : N->op_values()) {
4179     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4180       unsigned EltSize = VT.getScalarSizeInBits();
4181       unsigned HalfSize = EltSize / 2;
4182       if (isSigned) {
4183         if (!isIntN(HalfSize, C->getSExtValue()))
4184           return false;
4185       } else {
4186         if (!isUIntN(HalfSize, C->getZExtValue()))
4187           return false;
4188       }
4189       continue;
4190     }
4191     return false;
4192   }
4193 
4194   return true;
4195 }
4196 
4197 static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4198   if (N->getOpcode() == ISD::SIGN_EXTEND ||
4199       N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
4200     return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
4201                                              N->getOperand(0)->getValueType(0),
4202                                              N->getValueType(0),
4203                                              N->getOpcode());
4204 
4205   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4206   EVT VT = N->getValueType(0);
4207   SDLoc dl(N);
4208   unsigned EltSize = VT.getScalarSizeInBits() / 2;
4209   unsigned NumElts = VT.getVectorNumElements();
4210   MVT TruncVT = MVT::getIntegerVT(EltSize);
4211   SmallVector<SDValue, 8> Ops;
4212   for (unsigned i = 0; i != NumElts; ++i) {
4213     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4214     const APInt &CInt = C->getAPIntValue();
4215     // Element types smaller than 32 bits are not legal, so use i32 elements.
4216     // The values are implicitly truncated so sext vs. zext doesn't matter.
4217     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
4218   }
4219   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
4220 }
4221 
4222 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4223   return N->getOpcode() == ISD::SIGN_EXTEND ||
4224          N->getOpcode() == ISD::ANY_EXTEND ||
4225          isExtendedBUILD_VECTOR(N, DAG, true);
4226 }
4227 
4228 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4229   return N->getOpcode() == ISD::ZERO_EXTEND ||
4230          N->getOpcode() == ISD::ANY_EXTEND ||
4231          isExtendedBUILD_VECTOR(N, DAG, false);
4232 }
4233 
4234 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4235   unsigned Opcode = N->getOpcode();
4236   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4237     SDNode *N0 = N->getOperand(0).getNode();
4238     SDNode *N1 = N->getOperand(1).getNode();
4239     return N0->hasOneUse() && N1->hasOneUse() &&
4240       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4241   }
4242   return false;
4243 }
4244 
4245 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4246   unsigned Opcode = N->getOpcode();
4247   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4248     SDNode *N0 = N->getOperand(0).getNode();
4249     SDNode *N1 = N->getOperand(1).getNode();
4250     return N0->hasOneUse() && N1->hasOneUse() &&
4251       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4252   }
4253   return false;
4254 }
4255 
4256 SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4257                                                 SelectionDAG &DAG) const {
4258   // The rounding mode is in bits 23:22 of the FPSCR.
4259   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4260   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4261   // so that the shift + and get folded into a bitfield extract.
4262   SDLoc dl(Op);
4263 
4264   SDValue Chain = Op.getOperand(0);
4265   SDValue FPCR_64 = DAG.getNode(
4266       ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other},
4267       {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)});
4268   Chain = FPCR_64.getValue(1);
4269   SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64);
4270   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32,
4271                                   DAG.getConstant(1U << 22, dl, MVT::i32));
4272   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4273                               DAG.getConstant(22, dl, MVT::i32));
4274   SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4275                             DAG.getConstant(3, dl, MVT::i32));
4276   return DAG.getMergeValues({AND, Chain}, dl);
4277 }
4278 
4279 SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
4280                                                  SelectionDAG &DAG) const {
4281   SDLoc DL(Op);
4282   SDValue Chain = Op->getOperand(0);
4283   SDValue RMValue = Op->getOperand(1);
4284 
4285   // The rounding mode is in bits 23:22 of the FPCR.
4286   // The llvm.set.rounding argument value to the rounding mode in FPCR mapping
4287   // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
4288   // ((arg - 1) & 3) << 22).
4289   //
4290   // The argument of llvm.set.rounding must be within the segment [0, 3], so
4291   // NearestTiesToAway (4) is not handled here. It is responsibility of the code
4292   // generated llvm.set.rounding to ensure this condition.
4293 
4294   // Calculate new value of FPCR[23:22].
4295   RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
4296                         DAG.getConstant(1, DL, MVT::i32));
4297   RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
4298                         DAG.getConstant(0x3, DL, MVT::i32));
4299   RMValue =
4300       DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
4301                   DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32));
4302   RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue);
4303 
4304   // Get current value of FPCR.
4305   SDValue Ops[] = {
4306       Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)};
4307   SDValue FPCR =
4308       DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops);
4309   Chain = FPCR.getValue(1);
4310   FPCR = FPCR.getValue(0);
4311 
4312   // Put new rounding mode into FPSCR[23:22].
4313   const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos);
4314   FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR,
4315                      DAG.getConstant(RMMask, DL, MVT::i64));
4316   FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue);
4317   SDValue Ops2[] = {
4318       Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
4319       FPCR};
4320   return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
4321 }
4322 
4323 SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4324   EVT VT = Op.getValueType();
4325 
4326   // If SVE is available then i64 vector multiplications can also be made legal.
4327   bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64;
4328 
4329   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
4330     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
4331 
4332   // Multiplications are only custom-lowered for 128-bit vectors so that
4333   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
4334   assert(VT.is128BitVector() && VT.isInteger() &&
4335          "unexpected type for custom-lowering ISD::MUL");
4336   SDNode *N0 = Op.getOperand(0).getNode();
4337   SDNode *N1 = Op.getOperand(1).getNode();
4338   unsigned NewOpc = 0;
4339   bool isMLA = false;
4340   bool isN0SExt = isSignExtended(N0, DAG);
4341   bool isN1SExt = isSignExtended(N1, DAG);
4342   if (isN0SExt && isN1SExt)
4343     NewOpc = AArch64ISD::SMULL;
4344   else {
4345     bool isN0ZExt = isZeroExtended(N0, DAG);
4346     bool isN1ZExt = isZeroExtended(N1, DAG);
4347     if (isN0ZExt && isN1ZExt)
4348       NewOpc = AArch64ISD::UMULL;
4349     else if (isN1SExt || isN1ZExt) {
4350       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4351       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4352       if (isN1SExt && isAddSubSExt(N0, DAG)) {
4353         NewOpc = AArch64ISD::SMULL;
4354         isMLA = true;
4355       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4356         NewOpc =  AArch64ISD::UMULL;
4357         isMLA = true;
4358       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4359         std::swap(N0, N1);
4360         NewOpc =  AArch64ISD::UMULL;
4361         isMLA = true;
4362       }
4363     }
4364 
4365     if (!NewOpc) {
4366       if (VT == MVT::v2i64)
4367         // Fall through to expand this.  It is not legal.
4368         return SDValue();
4369       else
4370         // Other vector multiplications are legal.
4371         return Op;
4372     }
4373   }
4374 
4375   // Legalize to a S/UMULL instruction
4376   SDLoc DL(Op);
4377   SDValue Op0;
4378   SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
4379   if (!isMLA) {
4380     Op0 = skipExtensionForVectorMULL(N0, DAG);
4381     assert(Op0.getValueType().is64BitVector() &&
4382            Op1.getValueType().is64BitVector() &&
4383            "unexpected types for extended operands to VMULL");
4384     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4385   }
4386   // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
4387   // isel lowering to take advantage of no-stall back to back s/umul + s/umla.
4388   // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4389   SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
4390   SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
4391   EVT Op1VT = Op1.getValueType();
4392   return DAG.getNode(N0->getOpcode(), DL, VT,
4393                      DAG.getNode(NewOpc, DL, VT,
4394                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4395                      DAG.getNode(NewOpc, DL, VT,
4396                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4397 }
4398 
4399 static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
4400                                int Pattern) {
4401   if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
4402     return DAG.getConstant(1, DL, MVT::nxv1i1);
4403   return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
4404                      DAG.getTargetConstant(Pattern, DL, MVT::i32));
4405 }
4406 
4407 // Returns a safe bitcast between two scalable vector predicates, where
4408 // any newly created lanes from a widening bitcast are defined as zero.
4409 static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {
4410   SDLoc DL(Op);
4411   EVT InVT = Op.getValueType();
4412 
4413   assert(InVT.getVectorElementType() == MVT::i1 &&
4414          VT.getVectorElementType() == MVT::i1 &&
4415          "Expected a predicate-to-predicate bitcast");
4416   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
4417          InVT.isScalableVector() &&
4418          DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&
4419          "Only expect to cast between legal scalable predicate types!");
4420 
4421   // Return the operand if the cast isn't changing type,
4422   // e.g. <n x 16 x i1> -> <n x 16 x i1>
4423   if (InVT == VT)
4424     return Op;
4425 
4426   SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
4427 
4428   // We only have to zero the lanes if new lanes are being defined, e.g. when
4429   // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the
4430   // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then
4431   // we can return here.
4432   if (InVT.bitsGT(VT))
4433     return Reinterpret;
4434 
4435   // Check if the other lanes are already known to be zeroed by
4436   // construction.
4437   if (isZeroingInactiveLanes(Op))
4438     return Reinterpret;
4439 
4440   // Zero the newly introduced lanes.
4441   SDValue Mask = DAG.getConstant(1, DL, InVT);
4442   Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask);
4443   return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask);
4444 }
4445 
4446 SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4447                                                       SelectionDAG &DAG) const {
4448   unsigned IntNo = Op.getConstantOperandVal(1);
4449   SDLoc DL(Op);
4450   switch (IntNo) {
4451   default:
4452     return SDValue(); // Don't custom lower most intrinsics.
4453   case Intrinsic::aarch64_mops_memset_tag: {
4454     auto Node = cast<MemIntrinsicSDNode>(Op.getNode());
4455     SDValue Chain = Node->getChain();
4456     SDValue Dst = Op.getOperand(2);
4457     SDValue Val = Op.getOperand(3);
4458     Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64);
4459     SDValue Size = Op.getOperand(4);
4460     auto Alignment = Node->getMemOperand()->getAlign();
4461     bool IsVol = Node->isVolatile();
4462     auto DstPtrInfo = Node->getPointerInfo();
4463 
4464     const auto &SDI =
4465         static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo());
4466     SDValue MS =
4467         SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val,
4468                      Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{});
4469 
4470     // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the
4471     // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise
4472     // LowerOperationWrapper will complain that the number of results has
4473     // changed.
4474     return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL);
4475   }
4476   case Intrinsic::aarch64_sme_get_pstatesm: {
4477     SDValue Chain = Op.getOperand(0);
4478     SDValue MRS = DAG.getNode(
4479         AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
4480         Chain, DAG.getConstant(AArch64SysReg::SVCR, DL, MVT::i64));
4481     SDValue Mask = DAG.getConstant(/* PSTATE.SM */ 1, DL, MVT::i64);
4482     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, MRS, Mask);
4483     return DAG.getMergeValues({And, Chain}, DL);
4484   }
4485   }
4486 }
4487 
4488 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4489                                                      SelectionDAG &DAG) const {
4490   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4491   SDLoc dl(Op);
4492   switch (IntNo) {
4493   default: return SDValue();    // Don't custom lower most intrinsics.
4494   case Intrinsic::thread_pointer: {
4495     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4496     return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
4497   }
4498   case Intrinsic::aarch64_neon_abs: {
4499     EVT Ty = Op.getValueType();
4500     if (Ty == MVT::i64) {
4501       SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
4502                                    Op.getOperand(1));
4503       Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
4504       return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
4505     } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
4506       return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
4507     } else {
4508       report_fatal_error("Unexpected type for AArch64 NEON intrinic");
4509     }
4510   }
4511   case Intrinsic::aarch64_neon_smax:
4512     return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
4513                        Op.getOperand(1), Op.getOperand(2));
4514   case Intrinsic::aarch64_neon_umax:
4515     return DAG.getNode(ISD::UMAX, dl, Op.getValueType(),
4516                        Op.getOperand(1), Op.getOperand(2));
4517   case Intrinsic::aarch64_neon_smin:
4518     return DAG.getNode(ISD::SMIN, dl, Op.getValueType(),
4519                        Op.getOperand(1), Op.getOperand(2));
4520   case Intrinsic::aarch64_neon_umin:
4521     return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
4522                        Op.getOperand(1), Op.getOperand(2));
4523 
4524   case Intrinsic::aarch64_sve_sunpkhi:
4525     return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
4526                        Op.getOperand(1));
4527   case Intrinsic::aarch64_sve_sunpklo:
4528     return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(),
4529                        Op.getOperand(1));
4530   case Intrinsic::aarch64_sve_uunpkhi:
4531     return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(),
4532                        Op.getOperand(1));
4533   case Intrinsic::aarch64_sve_uunpklo:
4534     return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(),
4535                        Op.getOperand(1));
4536   case Intrinsic::aarch64_sve_clasta_n:
4537     return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(),
4538                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4539   case Intrinsic::aarch64_sve_clastb_n:
4540     return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(),
4541                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4542   case Intrinsic::aarch64_sve_lasta:
4543     return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(),
4544                        Op.getOperand(1), Op.getOperand(2));
4545   case Intrinsic::aarch64_sve_lastb:
4546     return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(),
4547                        Op.getOperand(1), Op.getOperand(2));
4548   case Intrinsic::aarch64_sve_rev:
4549     return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(),
4550                        Op.getOperand(1));
4551   case Intrinsic::aarch64_sve_tbl:
4552     return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(),
4553                        Op.getOperand(1), Op.getOperand(2));
4554   case Intrinsic::aarch64_sve_trn1:
4555     return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(),
4556                        Op.getOperand(1), Op.getOperand(2));
4557   case Intrinsic::aarch64_sve_trn2:
4558     return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(),
4559                        Op.getOperand(1), Op.getOperand(2));
4560   case Intrinsic::aarch64_sve_uzp1:
4561     return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(),
4562                        Op.getOperand(1), Op.getOperand(2));
4563   case Intrinsic::aarch64_sve_uzp2:
4564     return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(),
4565                        Op.getOperand(1), Op.getOperand(2));
4566   case Intrinsic::aarch64_sve_zip1:
4567     return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(),
4568                        Op.getOperand(1), Op.getOperand(2));
4569   case Intrinsic::aarch64_sve_zip2:
4570     return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(),
4571                        Op.getOperand(1), Op.getOperand(2));
4572   case Intrinsic::aarch64_sve_splice:
4573     return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
4574                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4575   case Intrinsic::aarch64_sve_ptrue:
4576     return getPTrue(DAG, dl, Op.getValueType(),
4577                     cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
4578   case Intrinsic::aarch64_sve_clz:
4579     return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
4580                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4581   case Intrinsic::aarch64_sme_cntsb:
4582     return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4583                        DAG.getConstant(1, dl, MVT::i32));
4584   case Intrinsic::aarch64_sme_cntsh: {
4585     SDValue One = DAG.getConstant(1, dl, MVT::i32);
4586     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One);
4587     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One);
4588   }
4589   case Intrinsic::aarch64_sme_cntsw: {
4590     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4591                                 DAG.getConstant(1, dl, MVT::i32));
4592     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4593                        DAG.getConstant(2, dl, MVT::i32));
4594   }
4595   case Intrinsic::aarch64_sme_cntsd: {
4596     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4597                                 DAG.getConstant(1, dl, MVT::i32));
4598     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4599                        DAG.getConstant(3, dl, MVT::i32));
4600   }
4601   case Intrinsic::aarch64_sve_cnt: {
4602     SDValue Data = Op.getOperand(3);
4603     // CTPOP only supports integer operands.
4604     if (Data.getValueType().isFloatingPoint())
4605       Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data);
4606     return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(),
4607                        Op.getOperand(2), Data, Op.getOperand(1));
4608   }
4609   case Intrinsic::aarch64_sve_dupq_lane:
4610     return LowerDUPQLane(Op, DAG);
4611   case Intrinsic::aarch64_sve_convert_from_svbool:
4612     return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG);
4613   case Intrinsic::aarch64_sve_convert_to_svbool:
4614     return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG);
4615   case Intrinsic::aarch64_sve_fneg:
4616     return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4617                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4618   case Intrinsic::aarch64_sve_frintp:
4619     return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(),
4620                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4621   case Intrinsic::aarch64_sve_frintm:
4622     return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(),
4623                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4624   case Intrinsic::aarch64_sve_frinti:
4625     return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4626                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4627   case Intrinsic::aarch64_sve_frintx:
4628     return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4629                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4630   case Intrinsic::aarch64_sve_frinta:
4631     return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(),
4632                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4633   case Intrinsic::aarch64_sve_frintn:
4634     return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(),
4635                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4636   case Intrinsic::aarch64_sve_frintz:
4637     return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(),
4638                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4639   case Intrinsic::aarch64_sve_ucvtf:
4640     return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl,
4641                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4642                        Op.getOperand(1));
4643   case Intrinsic::aarch64_sve_scvtf:
4644     return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl,
4645                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4646                        Op.getOperand(1));
4647   case Intrinsic::aarch64_sve_fcvtzu:
4648     return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl,
4649                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4650                        Op.getOperand(1));
4651   case Intrinsic::aarch64_sve_fcvtzs:
4652     return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl,
4653                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4654                        Op.getOperand(1));
4655   case Intrinsic::aarch64_sve_fsqrt:
4656     return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(),
4657                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4658   case Intrinsic::aarch64_sve_frecpx:
4659     return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(),
4660                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4661   case Intrinsic::aarch64_sve_frecpe_x:
4662     return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(),
4663                        Op.getOperand(1));
4664   case Intrinsic::aarch64_sve_frecps_x:
4665     return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(),
4666                        Op.getOperand(1), Op.getOperand(2));
4667   case Intrinsic::aarch64_sve_frsqrte_x:
4668     return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(),
4669                        Op.getOperand(1));
4670   case Intrinsic::aarch64_sve_frsqrts_x:
4671     return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(),
4672                        Op.getOperand(1), Op.getOperand(2));
4673   case Intrinsic::aarch64_sve_fabs:
4674     return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4675                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4676   case Intrinsic::aarch64_sve_abs:
4677     return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4678                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4679   case Intrinsic::aarch64_sve_neg:
4680     return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4681                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4682   case Intrinsic::aarch64_sve_insr: {
4683     SDValue Scalar = Op.getOperand(2);
4684     EVT ScalarTy = Scalar.getValueType();
4685     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
4686       Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
4687 
4688     return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
4689                        Op.getOperand(1), Scalar);
4690   }
4691   case Intrinsic::aarch64_sve_rbit:
4692     return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
4693                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4694                        Op.getOperand(1));
4695   case Intrinsic::aarch64_sve_revb:
4696     return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
4697                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4698   case Intrinsic::aarch64_sve_revh:
4699     return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(),
4700                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4701   case Intrinsic::aarch64_sve_revw:
4702     return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(),
4703                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4704   case Intrinsic::aarch64_sve_revd:
4705     return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(),
4706                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4707   case Intrinsic::aarch64_sve_sxtb:
4708     return DAG.getNode(
4709         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4710         Op.getOperand(2), Op.getOperand(3),
4711         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4712         Op.getOperand(1));
4713   case Intrinsic::aarch64_sve_sxth:
4714     return DAG.getNode(
4715         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4716         Op.getOperand(2), Op.getOperand(3),
4717         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4718         Op.getOperand(1));
4719   case Intrinsic::aarch64_sve_sxtw:
4720     return DAG.getNode(
4721         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4722         Op.getOperand(2), Op.getOperand(3),
4723         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4724         Op.getOperand(1));
4725   case Intrinsic::aarch64_sve_uxtb:
4726     return DAG.getNode(
4727         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4728         Op.getOperand(2), Op.getOperand(3),
4729         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4730         Op.getOperand(1));
4731   case Intrinsic::aarch64_sve_uxth:
4732     return DAG.getNode(
4733         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4734         Op.getOperand(2), Op.getOperand(3),
4735         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4736         Op.getOperand(1));
4737   case Intrinsic::aarch64_sve_uxtw:
4738     return DAG.getNode(
4739         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4740         Op.getOperand(2), Op.getOperand(3),
4741         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4742         Op.getOperand(1));
4743   case Intrinsic::localaddress: {
4744     const auto &MF = DAG.getMachineFunction();
4745     const auto *RegInfo = Subtarget->getRegisterInfo();
4746     unsigned Reg = RegInfo->getLocalAddressRegister(MF);
4747     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
4748                               Op.getSimpleValueType());
4749   }
4750 
4751   case Intrinsic::eh_recoverfp: {
4752     // FIXME: This needs to be implemented to correctly handle highly aligned
4753     // stack objects. For now we simply return the incoming FP. Refer D53541
4754     // for more details.
4755     SDValue FnOp = Op.getOperand(1);
4756     SDValue IncomingFPOp = Op.getOperand(2);
4757     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
4758     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
4759     if (!Fn)
4760       report_fatal_error(
4761           "llvm.eh.recoverfp must take a function as the first argument");
4762     return IncomingFPOp;
4763   }
4764 
4765   case Intrinsic::aarch64_neon_vsri:
4766   case Intrinsic::aarch64_neon_vsli: {
4767     EVT Ty = Op.getValueType();
4768 
4769     if (!Ty.isVector())
4770       report_fatal_error("Unexpected type for aarch64_neon_vsli");
4771 
4772     assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
4773 
4774     bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri;
4775     unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
4776     return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2),
4777                        Op.getOperand(3));
4778   }
4779 
4780   case Intrinsic::aarch64_neon_srhadd:
4781   case Intrinsic::aarch64_neon_urhadd:
4782   case Intrinsic::aarch64_neon_shadd:
4783   case Intrinsic::aarch64_neon_uhadd: {
4784     bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4785                         IntNo == Intrinsic::aarch64_neon_shadd);
4786     bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4787                           IntNo == Intrinsic::aarch64_neon_urhadd);
4788     unsigned Opcode = IsSignedAdd
4789                           ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
4790                           : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
4791     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4792                        Op.getOperand(2));
4793   }
4794   case Intrinsic::aarch64_neon_sabd:
4795   case Intrinsic::aarch64_neon_uabd: {
4796     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU
4797                                                             : ISD::ABDS;
4798     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4799                        Op.getOperand(2));
4800   }
4801   case Intrinsic::aarch64_neon_saddlp:
4802   case Intrinsic::aarch64_neon_uaddlp: {
4803     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp
4804                           ? AArch64ISD::UADDLP
4805                           : AArch64ISD::SADDLP;
4806     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1));
4807   }
4808   case Intrinsic::aarch64_neon_sdot:
4809   case Intrinsic::aarch64_neon_udot:
4810   case Intrinsic::aarch64_sve_sdot:
4811   case Intrinsic::aarch64_sve_udot: {
4812     unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot ||
4813                        IntNo == Intrinsic::aarch64_sve_udot)
4814                           ? AArch64ISD::UDOT
4815                           : AArch64ISD::SDOT;
4816     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4817                        Op.getOperand(2), Op.getOperand(3));
4818   }
4819   case Intrinsic::get_active_lane_mask: {
4820     SDValue ID =
4821         DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64);
4822     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID,
4823                        Op.getOperand(1), Op.getOperand(2));
4824   }
4825   }
4826 }
4827 
4828 bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
4829   if (VT.getVectorElementType() == MVT::i8 ||
4830       VT.getVectorElementType() == MVT::i16) {
4831     EltTy = MVT::i32;
4832     return true;
4833   }
4834   return false;
4835 }
4836 
4837 bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
4838                                                           EVT DataVT) const {
4839   // SVE only supports implicit extension of 32-bit indices.
4840   if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32)
4841     return false;
4842 
4843   // Indices cannot be smaller than the main data type.
4844   if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits())
4845     return false;
4846 
4847   // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit
4848   // element container type, which would violate the previous clause.
4849   return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2;
4850 }
4851 
4852 bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
4853   return ExtVal.getValueType().isScalableVector() ||
4854          useSVEForFixedLengthVectorVT(
4855              ExtVal.getValueType(),
4856              /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors());
4857 }
4858 
4859 unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
4860   std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = {
4861       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false),
4862        AArch64ISD::GLD1_MERGE_ZERO},
4863       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true),
4864        AArch64ISD::GLD1_UXTW_MERGE_ZERO},
4865       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false),
4866        AArch64ISD::GLD1_MERGE_ZERO},
4867       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true),
4868        AArch64ISD::GLD1_SXTW_MERGE_ZERO},
4869       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false),
4870        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4871       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true),
4872        AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO},
4873       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false),
4874        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4875       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true),
4876        AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO},
4877   };
4878   auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend);
4879   return AddrModes.find(Key)->second;
4880 }
4881 
4882 unsigned getSignExtendedGatherOpcode(unsigned Opcode) {
4883   switch (Opcode) {
4884   default:
4885     llvm_unreachable("unimplemented opcode");
4886     return Opcode;
4887   case AArch64ISD::GLD1_MERGE_ZERO:
4888     return AArch64ISD::GLD1S_MERGE_ZERO;
4889   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
4890     return AArch64ISD::GLD1S_IMM_MERGE_ZERO;
4891   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
4892     return AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
4893   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
4894     return AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
4895   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
4896     return AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
4897   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
4898     return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
4899   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
4900     return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
4901   }
4902 }
4903 
4904 SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
4905                                             SelectionDAG &DAG) const {
4906   MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op);
4907 
4908   SDLoc DL(Op);
4909   SDValue Chain = MGT->getChain();
4910   SDValue PassThru = MGT->getPassThru();
4911   SDValue Mask = MGT->getMask();
4912   SDValue BasePtr = MGT->getBasePtr();
4913   SDValue Index = MGT->getIndex();
4914   SDValue Scale = MGT->getScale();
4915   EVT VT = Op.getValueType();
4916   EVT MemVT = MGT->getMemoryVT();
4917   ISD::LoadExtType ExtType = MGT->getExtensionType();
4918   ISD::MemIndexType IndexType = MGT->getIndexType();
4919 
4920   // SVE supports zero (and so undef) passthrough values only, everything else
4921   // must be handled manually by an explicit select on the load's output.
4922   if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
4923     SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
4924     SDValue Load =
4925         DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4926                             MGT->getMemOperand(), IndexType, ExtType);
4927     SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru);
4928     return DAG.getMergeValues({Select, Load.getValue(1)}, DL);
4929   }
4930 
4931   bool IsScaled = MGT->isIndexScaled();
4932   bool IsSigned = MGT->isIndexSigned();
4933 
4934   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
4935   // must be calculated before hand.
4936   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
4937   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
4938     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
4939     EVT IndexVT = Index.getValueType();
4940     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
4941                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
4942     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
4943 
4944     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
4945     return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4946                                MGT->getMemOperand(), IndexType, ExtType);
4947   }
4948 
4949   // Lower fixed length gather to a scalable equivalent.
4950   if (VT.isFixedLengthVector()) {
4951     assert(Subtarget->useSVEForFixedLengthVectors() &&
4952            "Cannot lower when not using SVE for fixed vectors!");
4953 
4954     // NOTE: Handle floating-point as if integer then bitcast the result.
4955     EVT DataVT = VT.changeVectorElementTypeToInteger();
4956     MemVT = MemVT.changeVectorElementTypeToInteger();
4957 
4958     // Find the smallest integer fixed length vector we can use for the gather.
4959     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
4960     if (DataVT.getVectorElementType() == MVT::i64 ||
4961         Index.getValueType().getVectorElementType() == MVT::i64 ||
4962         Mask.getValueType().getVectorElementType() == MVT::i64)
4963       PromotedVT = VT.changeVectorElementType(MVT::i64);
4964 
4965     // Promote vector operands except for passthrough, which we know is either
4966     // undef or zero, and thus best constructed directly.
4967     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4968     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
4969     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
4970 
4971     // A promoted result type forces the need for an extending load.
4972     if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD)
4973       ExtType = ISD::EXTLOAD;
4974 
4975     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
4976 
4977     // Convert fixed length vector operands to scalable.
4978     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
4979     Index = convertToScalableVector(DAG, ContainerVT, Index);
4980     Mask = convertFixedMaskToScalableVector(Mask, DAG);
4981     PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
4982                                    : DAG.getConstant(0, DL, ContainerVT);
4983 
4984     // Emit equivalent scalable vector gather.
4985     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
4986     SDValue Load =
4987         DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL,
4988                             Ops, MGT->getMemOperand(), IndexType, ExtType);
4989 
4990     // Extract fixed length data then convert to the required result type.
4991     SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load);
4992     Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result);
4993     if (VT.isFloatingPoint())
4994       Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
4995 
4996     return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
4997   }
4998 
4999   // Everything else is legal.
5000   return Op;
5001 }
5002 
5003 SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
5004                                              SelectionDAG &DAG) const {
5005   MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op);
5006 
5007   SDLoc DL(Op);
5008   SDValue Chain = MSC->getChain();
5009   SDValue StoreVal = MSC->getValue();
5010   SDValue Mask = MSC->getMask();
5011   SDValue BasePtr = MSC->getBasePtr();
5012   SDValue Index = MSC->getIndex();
5013   SDValue Scale = MSC->getScale();
5014   EVT VT = StoreVal.getValueType();
5015   EVT MemVT = MSC->getMemoryVT();
5016   ISD::MemIndexType IndexType = MSC->getIndexType();
5017   bool Truncating = MSC->isTruncatingStore();
5018 
5019   bool IsScaled = MSC->isIndexScaled();
5020   bool IsSigned = MSC->isIndexSigned();
5021 
5022   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
5023   // must be calculated before hand.
5024   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
5025   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
5026     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
5027     EVT IndexVT = Index.getValueType();
5028     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
5029                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
5030     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
5031 
5032     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5033     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5034                                 MSC->getMemOperand(), IndexType, Truncating);
5035   }
5036 
5037   // Lower fixed length scatter to a scalable equivalent.
5038   if (VT.isFixedLengthVector()) {
5039     assert(Subtarget->useSVEForFixedLengthVectors() &&
5040            "Cannot lower when not using SVE for fixed vectors!");
5041 
5042     // Once bitcast we treat floating-point scatters as if integer.
5043     if (VT.isFloatingPoint()) {
5044       VT = VT.changeVectorElementTypeToInteger();
5045       MemVT = MemVT.changeVectorElementTypeToInteger();
5046       StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal);
5047     }
5048 
5049     // Find the smallest integer fixed length vector we can use for the scatter.
5050     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
5051     if (VT.getVectorElementType() == MVT::i64 ||
5052         Index.getValueType().getVectorElementType() == MVT::i64 ||
5053         Mask.getValueType().getVectorElementType() == MVT::i64)
5054       PromotedVT = VT.changeVectorElementType(MVT::i64);
5055 
5056     // Promote vector operands.
5057     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5058     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
5059     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
5060     StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal);
5061 
5062     // A promoted value type forces the need for a truncating store.
5063     if (PromotedVT != VT)
5064       Truncating = true;
5065 
5066     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
5067 
5068     // Convert fixed length vector operands to scalable.
5069     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
5070     Index = convertToScalableVector(DAG, ContainerVT, Index);
5071     Mask = convertFixedMaskToScalableVector(Mask, DAG);
5072     StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
5073 
5074     // Emit equivalent scalable vector scatter.
5075     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5076     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5077                                 MSC->getMemOperand(), IndexType, Truncating);
5078   }
5079 
5080   // Everything else is legal.
5081   return Op;
5082 }
5083 
5084 SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
5085   SDLoc DL(Op);
5086   MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op);
5087   assert(LoadNode && "Expected custom lowering of a masked load node");
5088   EVT VT = Op->getValueType(0);
5089 
5090   if (useSVEForFixedLengthVectorVT(
5091           VT,
5092           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5093     return LowerFixedLengthVectorMLoadToSVE(Op, DAG);
5094 
5095   SDValue PassThru = LoadNode->getPassThru();
5096   SDValue Mask = LoadNode->getMask();
5097 
5098   if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
5099     return Op;
5100 
5101   SDValue Load = DAG.getMaskedLoad(
5102       VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(),
5103       LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(),
5104       LoadNode->getMemOperand(), LoadNode->getAddressingMode(),
5105       LoadNode->getExtensionType());
5106 
5107   SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru);
5108 
5109   return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5110 }
5111 
5112 // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
5113 static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
5114                                         EVT VT, EVT MemVT,
5115                                         SelectionDAG &DAG) {
5116   assert(VT.isVector() && "VT should be a vector type");
5117   assert(MemVT == MVT::v4i8 && VT == MVT::v4i16);
5118 
5119   SDValue Value = ST->getValue();
5120 
5121   // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract
5122   // the word lane which represent the v4i8 subvector.  It optimizes the store
5123   // to:
5124   //
5125   //   xtn  v0.8b, v0.8h
5126   //   str  s0, [x0]
5127 
5128   SDValue Undef = DAG.getUNDEF(MVT::i16);
5129   SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL,
5130                                         {Undef, Undef, Undef, Undef});
5131 
5132   SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16,
5133                                  Value, UndefVec);
5134   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt);
5135 
5136   Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc);
5137   SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5138                                      Trunc, DAG.getConstant(0, DL, MVT::i64));
5139 
5140   return DAG.getStore(ST->getChain(), DL, ExtractTrunc,
5141                       ST->getBasePtr(), ST->getMemOperand());
5142 }
5143 
5144 // Custom lowering for any store, vector or scalar and/or default or with
5145 // a truncate operations.  Currently only custom lower truncate operation
5146 // from vector v4i16 to v4i8 or volatile stores of i128.
5147 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
5148                                           SelectionDAG &DAG) const {
5149   SDLoc Dl(Op);
5150   StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
5151   assert (StoreNode && "Can only custom lower store nodes");
5152 
5153   SDValue Value = StoreNode->getValue();
5154 
5155   EVT VT = Value.getValueType();
5156   EVT MemVT = StoreNode->getMemoryVT();
5157 
5158   if (VT.isVector()) {
5159     if (useSVEForFixedLengthVectorVT(
5160             VT,
5161             /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5162       return LowerFixedLengthVectorStoreToSVE(Op, DAG);
5163 
5164     unsigned AS = StoreNode->getAddressSpace();
5165     Align Alignment = StoreNode->getAlign();
5166     if (Alignment < MemVT.getStoreSize() &&
5167         !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
5168                                         StoreNode->getMemOperand()->getFlags(),
5169                                         nullptr)) {
5170       return scalarizeVectorStore(StoreNode, DAG);
5171     }
5172 
5173     if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 &&
5174         MemVT == MVT::v4i8) {
5175       return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG);
5176     }
5177     // 256 bit non-temporal stores can be lowered to STNP. Do this as part of
5178     // the custom lowering, as there are no un-paired non-temporal stores and
5179     // legalization will break up 256 bit inputs.
5180     ElementCount EC = MemVT.getVectorElementCount();
5181     if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u &&
5182         EC.isKnownEven() &&
5183         ((MemVT.getScalarSizeInBits() == 8u ||
5184           MemVT.getScalarSizeInBits() == 16u ||
5185           MemVT.getScalarSizeInBits() == 32u ||
5186           MemVT.getScalarSizeInBits() == 64u))) {
5187       SDValue Lo =
5188           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5189                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5190                       StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64));
5191       SDValue Hi =
5192           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5193                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5194                       StoreNode->getValue(),
5195                       DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64));
5196       SDValue Result = DAG.getMemIntrinsicNode(
5197           AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other),
5198           {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5199           StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5200       return Result;
5201     }
5202   } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) {
5203     return LowerStore128(Op, DAG);
5204   } else if (MemVT == MVT::i64x8) {
5205     SDValue Value = StoreNode->getValue();
5206     assert(Value->getValueType(0) == MVT::i64x8);
5207     SDValue Chain = StoreNode->getChain();
5208     SDValue Base = StoreNode->getBasePtr();
5209     EVT PtrVT = Base.getValueType();
5210     for (unsigned i = 0; i < 8; i++) {
5211       SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64,
5212                                  Value, DAG.getConstant(i, Dl, MVT::i32));
5213       SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base,
5214                                 DAG.getConstant(i * 8, Dl, PtrVT));
5215       Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(),
5216                            StoreNode->getOriginalAlign());
5217     }
5218     return Chain;
5219   }
5220 
5221   return SDValue();
5222 }
5223 
5224 /// Lower atomic or volatile 128-bit stores to a single STP instruction.
5225 SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
5226                                              SelectionDAG &DAG) const {
5227   MemSDNode *StoreNode = cast<MemSDNode>(Op);
5228   assert(StoreNode->getMemoryVT() == MVT::i128);
5229   assert(StoreNode->isVolatile() || StoreNode->isAtomic());
5230   assert(!StoreNode->isAtomic() ||
5231          StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||
5232          StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic);
5233 
5234   SDValue Value = StoreNode->getOpcode() == ISD::STORE
5235                       ? StoreNode->getOperand(1)
5236                       : StoreNode->getOperand(2);
5237   SDLoc DL(Op);
5238   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5239                            DAG.getConstant(0, DL, MVT::i64));
5240   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5241                            DAG.getConstant(1, DL, MVT::i64));
5242   SDValue Result = DAG.getMemIntrinsicNode(
5243       AArch64ISD::STP, DL, DAG.getVTList(MVT::Other),
5244       {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5245       StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5246   return Result;
5247 }
5248 
5249 SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
5250                                          SelectionDAG &DAG) const {
5251   SDLoc DL(Op);
5252   LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
5253   assert(LoadNode && "Expected custom lowering of a load node");
5254 
5255   if (LoadNode->getMemoryVT() == MVT::i64x8) {
5256     SmallVector<SDValue, 8> Ops;
5257     SDValue Base = LoadNode->getBasePtr();
5258     SDValue Chain = LoadNode->getChain();
5259     EVT PtrVT = Base.getValueType();
5260     for (unsigned i = 0; i < 8; i++) {
5261       SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
5262                                 DAG.getConstant(i * 8, DL, PtrVT));
5263       SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr,
5264                                  LoadNode->getPointerInfo(),
5265                                  LoadNode->getOriginalAlign());
5266       Ops.push_back(Part);
5267       Chain = SDValue(Part.getNode(), 1);
5268     }
5269     SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops);
5270     return DAG.getMergeValues({Loaded, Chain}, DL);
5271   }
5272 
5273   // Custom lowering for extending v4i8 vector loads.
5274   EVT VT = Op->getValueType(0);
5275   assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32");
5276 
5277   if (LoadNode->getMemoryVT() != MVT::v4i8)
5278     return SDValue();
5279 
5280   unsigned ExtType;
5281   if (LoadNode->getExtensionType() == ISD::SEXTLOAD)
5282     ExtType = ISD::SIGN_EXTEND;
5283   else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD ||
5284            LoadNode->getExtensionType() == ISD::EXTLOAD)
5285     ExtType = ISD::ZERO_EXTEND;
5286   else
5287     return SDValue();
5288 
5289   SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(),
5290                              LoadNode->getBasePtr(), MachinePointerInfo());
5291   SDValue Chain = Load.getValue(1);
5292   SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load);
5293   SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec);
5294   SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC);
5295   Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext,
5296                     DAG.getConstant(0, DL, MVT::i64));
5297   if (VT == MVT::v4i32)
5298     Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext);
5299   return DAG.getMergeValues({Ext, Chain}, DL);
5300 }
5301 
5302 // Generate SUBS and CSEL for integer abs.
5303 SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
5304   MVT VT = Op.getSimpleValueType();
5305 
5306   if (VT.isVector())
5307     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
5308 
5309   SDLoc DL(Op);
5310   SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
5311                             Op.getOperand(0));
5312   // Generate SUBS & CSEL.
5313   SDValue Cmp =
5314       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
5315                   Op.getOperand(0), DAG.getConstant(0, DL, VT));
5316   return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg,
5317                      DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
5318                      Cmp.getValue(1));
5319 }
5320 
5321 static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
5322   SDValue Chain = Op.getOperand(0);
5323   SDValue Cond = Op.getOperand(1);
5324   SDValue Dest = Op.getOperand(2);
5325 
5326   AArch64CC::CondCode CC;
5327   if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) {
5328     SDLoc dl(Op);
5329     SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32);
5330     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
5331                        Cmp);
5332   }
5333 
5334   return SDValue();
5335 }
5336 
5337 SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
5338                                               SelectionDAG &DAG) const {
5339   LLVM_DEBUG(dbgs() << "Custom lowering: ");
5340   LLVM_DEBUG(Op.dump());
5341 
5342   switch (Op.getOpcode()) {
5343   default:
5344     llvm_unreachable("unimplemented operand");
5345     return SDValue();
5346   case ISD::BITCAST:
5347     return LowerBITCAST(Op, DAG);
5348   case ISD::GlobalAddress:
5349     return LowerGlobalAddress(Op, DAG);
5350   case ISD::GlobalTLSAddress:
5351     return LowerGlobalTLSAddress(Op, DAG);
5352   case ISD::SETCC:
5353   case ISD::STRICT_FSETCC:
5354   case ISD::STRICT_FSETCCS:
5355     return LowerSETCC(Op, DAG);
5356   case ISD::BRCOND:
5357     return LowerBRCOND(Op, DAG);
5358   case ISD::BR_CC:
5359     return LowerBR_CC(Op, DAG);
5360   case ISD::SELECT:
5361     return LowerSELECT(Op, DAG);
5362   case ISD::SELECT_CC:
5363     return LowerSELECT_CC(Op, DAG);
5364   case ISD::JumpTable:
5365     return LowerJumpTable(Op, DAG);
5366   case ISD::BR_JT:
5367     return LowerBR_JT(Op, DAG);
5368   case ISD::ConstantPool:
5369     return LowerConstantPool(Op, DAG);
5370   case ISD::BlockAddress:
5371     return LowerBlockAddress(Op, DAG);
5372   case ISD::VASTART:
5373     return LowerVASTART(Op, DAG);
5374   case ISD::VACOPY:
5375     return LowerVACOPY(Op, DAG);
5376   case ISD::VAARG:
5377     return LowerVAARG(Op, DAG);
5378   case ISD::ADDCARRY:
5379     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
5380   case ISD::SUBCARRY:
5381     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
5382   case ISD::SADDO_CARRY:
5383     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
5384   case ISD::SSUBO_CARRY:
5385     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
5386   case ISD::SADDO:
5387   case ISD::UADDO:
5388   case ISD::SSUBO:
5389   case ISD::USUBO:
5390   case ISD::SMULO:
5391   case ISD::UMULO:
5392     return LowerXALUO(Op, DAG);
5393   case ISD::FADD:
5394     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
5395   case ISD::FSUB:
5396     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
5397   case ISD::FMUL:
5398     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
5399   case ISD::FMA:
5400     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
5401   case ISD::FDIV:
5402     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
5403   case ISD::FNEG:
5404     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
5405   case ISD::FCEIL:
5406     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU);
5407   case ISD::FFLOOR:
5408     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU);
5409   case ISD::FNEARBYINT:
5410     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU);
5411   case ISD::FRINT:
5412     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU);
5413   case ISD::FROUND:
5414     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU);
5415   case ISD::FROUNDEVEN:
5416     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU);
5417   case ISD::FTRUNC:
5418     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU);
5419   case ISD::FSQRT:
5420     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU);
5421   case ISD::FABS:
5422     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU);
5423   case ISD::FP_ROUND:
5424   case ISD::STRICT_FP_ROUND:
5425     return LowerFP_ROUND(Op, DAG);
5426   case ISD::FP_EXTEND:
5427     return LowerFP_EXTEND(Op, DAG);
5428   case ISD::FRAMEADDR:
5429     return LowerFRAMEADDR(Op, DAG);
5430   case ISD::SPONENTRY:
5431     return LowerSPONENTRY(Op, DAG);
5432   case ISD::RETURNADDR:
5433     return LowerRETURNADDR(Op, DAG);
5434   case ISD::ADDROFRETURNADDR:
5435     return LowerADDROFRETURNADDR(Op, DAG);
5436   case ISD::CONCAT_VECTORS:
5437     return LowerCONCAT_VECTORS(Op, DAG);
5438   case ISD::INSERT_VECTOR_ELT:
5439     return LowerINSERT_VECTOR_ELT(Op, DAG);
5440   case ISD::EXTRACT_VECTOR_ELT:
5441     return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5442   case ISD::BUILD_VECTOR:
5443     return LowerBUILD_VECTOR(Op, DAG);
5444   case ISD::VECTOR_SHUFFLE:
5445     return LowerVECTOR_SHUFFLE(Op, DAG);
5446   case ISD::SPLAT_VECTOR:
5447     return LowerSPLAT_VECTOR(Op, DAG);
5448   case ISD::EXTRACT_SUBVECTOR:
5449     return LowerEXTRACT_SUBVECTOR(Op, DAG);
5450   case ISD::INSERT_SUBVECTOR:
5451     return LowerINSERT_SUBVECTOR(Op, DAG);
5452   case ISD::SDIV:
5453   case ISD::UDIV:
5454     return LowerDIV(Op, DAG);
5455   case ISD::SMIN:
5456   case ISD::UMIN:
5457   case ISD::SMAX:
5458   case ISD::UMAX:
5459     return LowerMinMax(Op, DAG);
5460   case ISD::SRA:
5461   case ISD::SRL:
5462   case ISD::SHL:
5463     return LowerVectorSRA_SRL_SHL(Op, DAG);
5464   case ISD::SHL_PARTS:
5465   case ISD::SRL_PARTS:
5466   case ISD::SRA_PARTS:
5467     return LowerShiftParts(Op, DAG);
5468   case ISD::CTPOP:
5469   case ISD::PARITY:
5470     return LowerCTPOP_PARITY(Op, DAG);
5471   case ISD::FCOPYSIGN:
5472     return LowerFCOPYSIGN(Op, DAG);
5473   case ISD::OR:
5474     return LowerVectorOR(Op, DAG);
5475   case ISD::XOR:
5476     return LowerXOR(Op, DAG);
5477   case ISD::PREFETCH:
5478     return LowerPREFETCH(Op, DAG);
5479   case ISD::SINT_TO_FP:
5480   case ISD::UINT_TO_FP:
5481   case ISD::STRICT_SINT_TO_FP:
5482   case ISD::STRICT_UINT_TO_FP:
5483     return LowerINT_TO_FP(Op, DAG);
5484   case ISD::FP_TO_SINT:
5485   case ISD::FP_TO_UINT:
5486   case ISD::STRICT_FP_TO_SINT:
5487   case ISD::STRICT_FP_TO_UINT:
5488     return LowerFP_TO_INT(Op, DAG);
5489   case ISD::FP_TO_SINT_SAT:
5490   case ISD::FP_TO_UINT_SAT:
5491     return LowerFP_TO_INT_SAT(Op, DAG);
5492   case ISD::FSINCOS:
5493     return LowerFSINCOS(Op, DAG);
5494   case ISD::FLT_ROUNDS_:
5495     return LowerFLT_ROUNDS_(Op, DAG);
5496   case ISD::SET_ROUNDING:
5497     return LowerSET_ROUNDING(Op, DAG);
5498   case ISD::MUL:
5499     return LowerMUL(Op, DAG);
5500   case ISD::MULHS:
5501     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED);
5502   case ISD::MULHU:
5503     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED);
5504   case ISD::INTRINSIC_W_CHAIN:
5505     return LowerINTRINSIC_W_CHAIN(Op, DAG);
5506   case ISD::INTRINSIC_WO_CHAIN:
5507     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5508   case ISD::ATOMIC_STORE:
5509     if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) {
5510       assert(Subtarget->hasLSE2());
5511       return LowerStore128(Op, DAG);
5512     }
5513     return SDValue();
5514   case ISD::STORE:
5515     return LowerSTORE(Op, DAG);
5516   case ISD::MSTORE:
5517     return LowerFixedLengthVectorMStoreToSVE(Op, DAG);
5518   case ISD::MGATHER:
5519     return LowerMGATHER(Op, DAG);
5520   case ISD::MSCATTER:
5521     return LowerMSCATTER(Op, DAG);
5522   case ISD::VECREDUCE_SEQ_FADD:
5523     return LowerVECREDUCE_SEQ_FADD(Op, DAG);
5524   case ISD::VECREDUCE_ADD:
5525   case ISD::VECREDUCE_AND:
5526   case ISD::VECREDUCE_OR:
5527   case ISD::VECREDUCE_XOR:
5528   case ISD::VECREDUCE_SMAX:
5529   case ISD::VECREDUCE_SMIN:
5530   case ISD::VECREDUCE_UMAX:
5531   case ISD::VECREDUCE_UMIN:
5532   case ISD::VECREDUCE_FADD:
5533   case ISD::VECREDUCE_FMAX:
5534   case ISD::VECREDUCE_FMIN:
5535     return LowerVECREDUCE(Op, DAG);
5536   case ISD::ATOMIC_LOAD_SUB:
5537     return LowerATOMIC_LOAD_SUB(Op, DAG);
5538   case ISD::ATOMIC_LOAD_AND:
5539     return LowerATOMIC_LOAD_AND(Op, DAG);
5540   case ISD::DYNAMIC_STACKALLOC:
5541     return LowerDYNAMIC_STACKALLOC(Op, DAG);
5542   case ISD::VSCALE:
5543     return LowerVSCALE(Op, DAG);
5544   case ISD::ANY_EXTEND:
5545   case ISD::SIGN_EXTEND:
5546   case ISD::ZERO_EXTEND:
5547     return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
5548   case ISD::SIGN_EXTEND_INREG: {
5549     // Only custom lower when ExtraVT has a legal byte based element type.
5550     EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
5551     EVT ExtraEltVT = ExtraVT.getVectorElementType();
5552     if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) &&
5553         (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64))
5554       return SDValue();
5555 
5556     return LowerToPredicatedOp(Op, DAG,
5557                                AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU);
5558   }
5559   case ISD::TRUNCATE:
5560     return LowerTRUNCATE(Op, DAG);
5561   case ISD::MLOAD:
5562     return LowerMLOAD(Op, DAG);
5563   case ISD::LOAD:
5564     if (useSVEForFixedLengthVectorVT(Op.getValueType()))
5565       return LowerFixedLengthVectorLoadToSVE(Op, DAG);
5566     return LowerLOAD(Op, DAG);
5567   case ISD::ADD:
5568   case ISD::AND:
5569   case ISD::SUB:
5570     return LowerToScalableOp(Op, DAG);
5571   case ISD::FMAXIMUM:
5572     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED);
5573   case ISD::FMAXNUM:
5574     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED);
5575   case ISD::FMINIMUM:
5576     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED);
5577   case ISD::FMINNUM:
5578     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED);
5579   case ISD::VSELECT:
5580     return LowerFixedLengthVectorSelectToSVE(Op, DAG);
5581   case ISD::ABS:
5582     return LowerABS(Op, DAG);
5583   case ISD::ABDS:
5584     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
5585   case ISD::ABDU:
5586     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
5587   case ISD::BITREVERSE:
5588     return LowerBitreverse(Op, DAG);
5589   case ISD::BSWAP:
5590     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
5591   case ISD::CTLZ:
5592     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU);
5593   case ISD::CTTZ:
5594     return LowerCTTZ(Op, DAG);
5595   case ISD::VECTOR_SPLICE:
5596     return LowerVECTOR_SPLICE(Op, DAG);
5597   case ISD::STRICT_LROUND:
5598   case ISD::STRICT_LLROUND:
5599   case ISD::STRICT_LRINT:
5600   case ISD::STRICT_LLRINT: {
5601     assert(Op.getOperand(1).getValueType() == MVT::f16 &&
5602            "Expected custom lowering of rounding operations only for f16");
5603     SDLoc DL(Op);
5604     SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
5605                               {Op.getOperand(0), Op.getOperand(1)});
5606     return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
5607                        {Ext.getValue(1), Ext.getValue(0)});
5608   }
5609   }
5610 }
5611 
5612 bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
5613   return !Subtarget->useSVEForFixedLengthVectors();
5614 }
5615 
5616 bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
5617     EVT VT, bool OverrideNEON) const {
5618   if (!VT.isFixedLengthVector() || !VT.isSimple())
5619     return false;
5620 
5621   // Don't use SVE for vectors we cannot scalarize if required.
5622   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
5623   // Fixed length predicates should be promoted to i8.
5624   // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
5625   case MVT::i1:
5626   default:
5627     return false;
5628   case MVT::i8:
5629   case MVT::i16:
5630   case MVT::i32:
5631   case MVT::i64:
5632   case MVT::f16:
5633   case MVT::f32:
5634   case MVT::f64:
5635     break;
5636   }
5637 
5638   // All SVE implementations support NEON sized vectors.
5639   if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
5640     return Subtarget->hasSVE();
5641 
5642   // Ensure NEON MVTs only belong to a single register class.
5643   if (VT.getFixedSizeInBits() <= 128)
5644     return false;
5645 
5646   // Ensure wider than NEON code generation is enabled.
5647   if (!Subtarget->useSVEForFixedLengthVectors())
5648     return false;
5649 
5650   // Don't use SVE for types that don't fit.
5651   if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits())
5652     return false;
5653 
5654   // TODO: Perhaps an artificial restriction, but worth having whilst getting
5655   // the base fixed length SVE support in place.
5656   if (!VT.isPow2VectorType())
5657     return false;
5658 
5659   return true;
5660 }
5661 
5662 //===----------------------------------------------------------------------===//
5663 //                      Calling Convention Implementation
5664 //===----------------------------------------------------------------------===//
5665 
5666 static unsigned getIntrinsicID(const SDNode *N) {
5667   unsigned Opcode = N->getOpcode();
5668   switch (Opcode) {
5669   default:
5670     return Intrinsic::not_intrinsic;
5671   case ISD::INTRINSIC_WO_CHAIN: {
5672     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5673     if (IID < Intrinsic::num_intrinsics)
5674       return IID;
5675     return Intrinsic::not_intrinsic;
5676   }
5677   }
5678 }
5679 
5680 bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
5681                                                 SDValue N1) const {
5682   if (!N0.hasOneUse())
5683     return false;
5684 
5685   unsigned IID = getIntrinsicID(N1.getNode());
5686   // Avoid reassociating expressions that can be lowered to smlal/umlal.
5687   if (IID == Intrinsic::aarch64_neon_umull ||
5688       N1.getOpcode() == AArch64ISD::UMULL ||
5689       IID == Intrinsic::aarch64_neon_smull ||
5690       N1.getOpcode() == AArch64ISD::SMULL)
5691     return N0.getOpcode() != ISD::ADD;
5692 
5693   return true;
5694 }
5695 
5696 /// Selects the correct CCAssignFn for a given CallingConvention value.
5697 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
5698                                                      bool IsVarArg) const {
5699   switch (CC) {
5700   default:
5701     report_fatal_error("Unsupported calling convention.");
5702   case CallingConv::WebKit_JS:
5703     return CC_AArch64_WebKit_JS;
5704   case CallingConv::GHC:
5705     return CC_AArch64_GHC;
5706   case CallingConv::C:
5707   case CallingConv::Fast:
5708   case CallingConv::PreserveMost:
5709   case CallingConv::CXX_FAST_TLS:
5710   case CallingConv::Swift:
5711   case CallingConv::SwiftTail:
5712   case CallingConv::Tail:
5713     if (Subtarget->isTargetWindows() && IsVarArg)
5714       return CC_AArch64_Win64_VarArg;
5715     if (!Subtarget->isTargetDarwin())
5716       return CC_AArch64_AAPCS;
5717     if (!IsVarArg)
5718       return CC_AArch64_DarwinPCS;
5719     return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg
5720                                       : CC_AArch64_DarwinPCS_VarArg;
5721    case CallingConv::Win64:
5722     return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
5723    case CallingConv::CFGuard_Check:
5724      return CC_AArch64_Win64_CFGuard_Check;
5725    case CallingConv::AArch64_VectorCall:
5726    case CallingConv::AArch64_SVE_VectorCall:
5727      return CC_AArch64_AAPCS;
5728   }
5729 }
5730 
5731 CCAssignFn *
5732 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
5733   return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
5734                                       : RetCC_AArch64_AAPCS;
5735 }
5736 
5737 SDValue AArch64TargetLowering::LowerFormalArguments(
5738     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
5739     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5740     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5741   MachineFunction &MF = DAG.getMachineFunction();
5742   const Function &F = MF.getFunction();
5743   MachineFrameInfo &MFI = MF.getFrameInfo();
5744   bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv());
5745   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
5746 
5747   SmallVector<ISD::OutputArg, 4> Outs;
5748   GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs,
5749                 DAG.getTargetLoweringInfo(), MF.getDataLayout());
5750   if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }))
5751     FuncInfo->setIsSVECC(true);
5752 
5753   // Assign locations to all of the incoming arguments.
5754   SmallVector<CCValAssign, 16> ArgLocs;
5755   DenseMap<unsigned, SDValue> CopiedRegs;
5756   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5757 
5758   // At this point, Ins[].VT may already be promoted to i32. To correctly
5759   // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
5760   // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
5761   // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
5762   // we use a special version of AnalyzeFormalArguments to pass in ValVT and
5763   // LocVT.
5764   unsigned NumArgs = Ins.size();
5765   Function::const_arg_iterator CurOrigArg = F.arg_begin();
5766   unsigned CurArgIdx = 0;
5767   for (unsigned i = 0; i != NumArgs; ++i) {
5768     MVT ValVT = Ins[i].VT;
5769     if (Ins[i].isOrigArg()) {
5770       std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
5771       CurArgIdx = Ins[i].getOrigArgIndex();
5772 
5773       // Get type of the original argument.
5774       EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(),
5775                                   /*AllowUnknown*/ true);
5776       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
5777       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
5778       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
5779         ValVT = MVT::i8;
5780       else if (ActualMVT == MVT::i16)
5781         ValVT = MVT::i16;
5782     }
5783     bool UseVarArgCC = false;
5784     if (IsWin64)
5785       UseVarArgCC = isVarArg;
5786     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
5787     bool Res =
5788         AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
5789     assert(!Res && "Call operand has unhandled type");
5790     (void)Res;
5791   }
5792   SmallVector<SDValue, 16> ArgValues;
5793   unsigned ExtraArgLocs = 0;
5794   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5795     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
5796 
5797     if (Ins[i].Flags.isByVal()) {
5798       // Byval is used for HFAs in the PCS, but the system should work in a
5799       // non-compliant manner for larger structs.
5800       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5801       int Size = Ins[i].Flags.getByValSize();
5802       unsigned NumRegs = (Size + 7) / 8;
5803 
5804       // FIXME: This works on big-endian for composite byvals, which are the common
5805       // case. It should also work for fundamental types too.
5806       unsigned FrameIdx =
5807         MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
5808       SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT);
5809       InVals.push_back(FrameIdxN);
5810 
5811       continue;
5812     }
5813 
5814     if (Ins[i].Flags.isSwiftAsync())
5815       MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5816 
5817     SDValue ArgValue;
5818     if (VA.isRegLoc()) {
5819       // Arguments stored in registers.
5820       EVT RegVT = VA.getLocVT();
5821       const TargetRegisterClass *RC;
5822 
5823       if (RegVT == MVT::i32)
5824         RC = &AArch64::GPR32RegClass;
5825       else if (RegVT == MVT::i64)
5826         RC = &AArch64::GPR64RegClass;
5827       else if (RegVT == MVT::f16 || RegVT == MVT::bf16)
5828         RC = &AArch64::FPR16RegClass;
5829       else if (RegVT == MVT::f32)
5830         RC = &AArch64::FPR32RegClass;
5831       else if (RegVT == MVT::f64 || RegVT.is64BitVector())
5832         RC = &AArch64::FPR64RegClass;
5833       else if (RegVT == MVT::f128 || RegVT.is128BitVector())
5834         RC = &AArch64::FPR128RegClass;
5835       else if (RegVT.isScalableVector() &&
5836                RegVT.getVectorElementType() == MVT::i1) {
5837         FuncInfo->setIsSVECC(true);
5838         RC = &AArch64::PPRRegClass;
5839       } else if (RegVT.isScalableVector()) {
5840         FuncInfo->setIsSVECC(true);
5841         RC = &AArch64::ZPRRegClass;
5842       } else
5843         llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
5844 
5845       // Transform the arguments in physical registers into virtual ones.
5846       Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
5847       ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
5848 
5849       // If this is an 8, 16 or 32-bit value, it is really passed promoted
5850       // to 64 bits.  Insert an assert[sz]ext to capture this, then
5851       // truncate to the right size.
5852       switch (VA.getLocInfo()) {
5853       default:
5854         llvm_unreachable("Unknown loc info!");
5855       case CCValAssign::Full:
5856         break;
5857       case CCValAssign::Indirect:
5858         assert(VA.getValVT().isScalableVector() &&
5859                "Only scalable vectors can be passed indirectly");
5860         break;
5861       case CCValAssign::BCvt:
5862         ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
5863         break;
5864       case CCValAssign::AExt:
5865       case CCValAssign::SExt:
5866       case CCValAssign::ZExt:
5867         break;
5868       case CCValAssign::AExtUpper:
5869         ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue,
5870                                DAG.getConstant(32, DL, RegVT));
5871         ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT());
5872         break;
5873       }
5874     } else { // VA.isRegLoc()
5875       assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
5876       unsigned ArgOffset = VA.getLocMemOffset();
5877       unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect
5878                               ? VA.getLocVT().getSizeInBits()
5879                               : VA.getValVT().getSizeInBits()) / 8;
5880 
5881       uint32_t BEAlign = 0;
5882       if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
5883           !Ins[i].Flags.isInConsecutiveRegs())
5884         BEAlign = 8 - ArgSize;
5885 
5886       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
5887 
5888       // Create load nodes to retrieve arguments from the stack.
5889       SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5890 
5891       // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
5892       ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
5893       MVT MemVT = VA.getValVT();
5894 
5895       switch (VA.getLocInfo()) {
5896       default:
5897         break;
5898       case CCValAssign::Trunc:
5899       case CCValAssign::BCvt:
5900         MemVT = VA.getLocVT();
5901         break;
5902       case CCValAssign::Indirect:
5903         assert(VA.getValVT().isScalableVector() &&
5904                "Only scalable vectors can be passed indirectly");
5905         MemVT = VA.getLocVT();
5906         break;
5907       case CCValAssign::SExt:
5908         ExtType = ISD::SEXTLOAD;
5909         break;
5910       case CCValAssign::ZExt:
5911         ExtType = ISD::ZEXTLOAD;
5912         break;
5913       case CCValAssign::AExt:
5914         ExtType = ISD::EXTLOAD;
5915         break;
5916       }
5917 
5918       ArgValue =
5919           DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN,
5920                          MachinePointerInfo::getFixedStack(MF, FI), MemVT);
5921     }
5922 
5923     if (VA.getLocInfo() == CCValAssign::Indirect) {
5924       assert(VA.getValVT().isScalableVector() &&
5925            "Only scalable vectors can be passed indirectly");
5926 
5927       uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize();
5928       unsigned NumParts = 1;
5929       if (Ins[i].Flags.isInConsecutiveRegs()) {
5930         assert(!Ins[i].Flags.isInConsecutiveRegsLast());
5931         while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
5932           ++NumParts;
5933       }
5934 
5935       MVT PartLoad = VA.getValVT();
5936       SDValue Ptr = ArgValue;
5937 
5938       // Ensure we generate all loads for each tuple part, whilst updating the
5939       // pointer after each load correctly using vscale.
5940       while (NumParts > 0) {
5941         ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo());
5942         InVals.push_back(ArgValue);
5943         NumParts--;
5944         if (NumParts > 0) {
5945           SDValue BytesIncrement = DAG.getVScale(
5946               DL, Ptr.getValueType(),
5947               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
5948           SDNodeFlags Flags;
5949           Flags.setNoUnsignedWrap(true);
5950           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
5951                             BytesIncrement, Flags);
5952           ExtraArgLocs++;
5953           i++;
5954         }
5955       }
5956     } else {
5957       if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer())
5958         ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(),
5959                                ArgValue, DAG.getValueType(MVT::i32));
5960 
5961       // i1 arguments are zero-extended to i8 by the caller. Emit a
5962       // hint to reflect this.
5963       if (Ins[i].isOrigArg()) {
5964         Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex());
5965         if (OrigArg->getType()->isIntegerTy(1)) {
5966           if (!Ins[i].Flags.isZExt()) {
5967             ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL,
5968                                    ArgValue.getValueType(), ArgValue);
5969           }
5970         }
5971       }
5972 
5973       InVals.push_back(ArgValue);
5974     }
5975   }
5976   assert((ArgLocs.size() + ExtraArgLocs) == Ins.size());
5977 
5978   // varargs
5979   if (isVarArg) {
5980     if (!Subtarget->isTargetDarwin() || IsWin64) {
5981       // The AAPCS variadic function ABI is identical to the non-variadic
5982       // one. As a result there may be more arguments in registers and we should
5983       // save them for future reference.
5984       // Win64 variadic functions also pass arguments in registers, but all float
5985       // arguments are passed in integer registers.
5986       saveVarArgRegisters(CCInfo, DAG, DL, Chain);
5987     }
5988 
5989     // This will point to the next argument passed via stack.
5990     unsigned StackOffset = CCInfo.getNextStackOffset();
5991     // We currently pass all varargs at 8-byte alignment, or 4 for ILP32
5992     StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
5993     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
5994 
5995     if (MFI.hasMustTailInVarArgFunc()) {
5996       SmallVector<MVT, 2> RegParmTypes;
5997       RegParmTypes.push_back(MVT::i64);
5998       RegParmTypes.push_back(MVT::f128);
5999       // Compute the set of forwarded registers. The rest are scratch.
6000       SmallVectorImpl<ForwardedRegister> &Forwards =
6001                                        FuncInfo->getForwardedMustTailRegParms();
6002       CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
6003                                                CC_AArch64_AAPCS);
6004 
6005       // Conservatively forward X8, since it might be used for aggregate return.
6006       if (!CCInfo.isAllocated(AArch64::X8)) {
6007         Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
6008         Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
6009       }
6010     }
6011   }
6012 
6013   // On Windows, InReg pointers must be returned, so record the pointer in a
6014   // virtual register at the start of the function so it can be returned in the
6015   // epilogue.
6016   if (IsWin64) {
6017     for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
6018       if (Ins[I].Flags.isInReg()) {
6019         assert(!FuncInfo->getSRetReturnReg());
6020 
6021         MVT PtrTy = getPointerTy(DAG.getDataLayout());
6022         Register Reg =
6023             MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
6024         FuncInfo->setSRetReturnReg(Reg);
6025 
6026         SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
6027         Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
6028         break;
6029       }
6030     }
6031   }
6032 
6033   unsigned StackArgSize = CCInfo.getNextStackOffset();
6034   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6035   if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
6036     // This is a non-standard ABI so by fiat I say we're allowed to make full
6037     // use of the stack area to be popped, which must be aligned to 16 bytes in
6038     // any case:
6039     StackArgSize = alignTo(StackArgSize, 16);
6040 
6041     // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
6042     // a multiple of 16.
6043     FuncInfo->setArgumentStackToRestore(StackArgSize);
6044 
6045     // This realignment carries over to the available bytes below. Our own
6046     // callers will guarantee the space is free by giving an aligned value to
6047     // CALLSEQ_START.
6048   }
6049   // Even if we're not expected to free up the space, it's useful to know how
6050   // much is there while considering tail calls (because we can reuse it).
6051   FuncInfo->setBytesInStackArgArea(StackArgSize);
6052 
6053   if (Subtarget->hasCustomCallingConv())
6054     Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
6055 
6056   return Chain;
6057 }
6058 
6059 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
6060                                                 SelectionDAG &DAG,
6061                                                 const SDLoc &DL,
6062                                                 SDValue &Chain) const {
6063   MachineFunction &MF = DAG.getMachineFunction();
6064   MachineFrameInfo &MFI = MF.getFrameInfo();
6065   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6066   auto PtrVT = getPointerTy(DAG.getDataLayout());
6067   bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
6068 
6069   SmallVector<SDValue, 8> MemOps;
6070 
6071   static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
6072                                           AArch64::X3, AArch64::X4, AArch64::X5,
6073                                           AArch64::X6, AArch64::X7 };
6074   static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
6075   unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
6076 
6077   unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
6078   int GPRIdx = 0;
6079   if (GPRSaveSize != 0) {
6080     if (IsWin64) {
6081       GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
6082       if (GPRSaveSize & 15)
6083         // The extra size here, if triggered, will always be 8.
6084         MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
6085     } else
6086       GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
6087 
6088     SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
6089 
6090     for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
6091       Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
6092       SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
6093       SDValue Store =
6094           DAG.getStore(Val.getValue(1), DL, Val, FIN,
6095                        IsWin64 ? MachinePointerInfo::getFixedStack(
6096                                      MF, GPRIdx, (i - FirstVariadicGPR) * 8)
6097                                : MachinePointerInfo::getStack(MF, i * 8));
6098       MemOps.push_back(Store);
6099       FIN =
6100           DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
6101     }
6102   }
6103   FuncInfo->setVarArgsGPRIndex(GPRIdx);
6104   FuncInfo->setVarArgsGPRSize(GPRSaveSize);
6105 
6106   if (Subtarget->hasFPARMv8() && !IsWin64) {
6107     static const MCPhysReg FPRArgRegs[] = {
6108         AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
6109         AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
6110     static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
6111     unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
6112 
6113     unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
6114     int FPRIdx = 0;
6115     if (FPRSaveSize != 0) {
6116       FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
6117 
6118       SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
6119 
6120       for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
6121         Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
6122         SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
6123 
6124         SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
6125                                      MachinePointerInfo::getStack(MF, i * 16));
6126         MemOps.push_back(Store);
6127         FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
6128                           DAG.getConstant(16, DL, PtrVT));
6129       }
6130     }
6131     FuncInfo->setVarArgsFPRIndex(FPRIdx);
6132     FuncInfo->setVarArgsFPRSize(FPRSaveSize);
6133   }
6134 
6135   if (!MemOps.empty()) {
6136     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
6137   }
6138 }
6139 
6140 /// LowerCallResult - Lower the result values of a call into the
6141 /// appropriate copies out of appropriate physical registers.
6142 SDValue AArch64TargetLowering::LowerCallResult(
6143     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
6144     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
6145     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
6146     SDValue ThisVal) const {
6147   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6148   // Assign locations to each value returned by this call.
6149   SmallVector<CCValAssign, 16> RVLocs;
6150   DenseMap<unsigned, SDValue> CopiedRegs;
6151   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6152                  *DAG.getContext());
6153   CCInfo.AnalyzeCallResult(Ins, RetCC);
6154 
6155   // Copy all of the result registers out of their specified physreg.
6156   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6157     CCValAssign VA = RVLocs[i];
6158 
6159     // Pass 'this' value directly from the argument to return value, to avoid
6160     // reg unit interference
6161     if (i == 0 && isThisReturn) {
6162       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
6163              "unexpected return calling convention register assignment");
6164       InVals.push_back(ThisVal);
6165       continue;
6166     }
6167 
6168     // Avoid copying a physreg twice since RegAllocFast is incompetent and only
6169     // allows one use of a physreg per block.
6170     SDValue Val = CopiedRegs.lookup(VA.getLocReg());
6171     if (!Val) {
6172       Val =
6173           DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
6174       Chain = Val.getValue(1);
6175       InFlag = Val.getValue(2);
6176       CopiedRegs[VA.getLocReg()] = Val;
6177     }
6178 
6179     switch (VA.getLocInfo()) {
6180     default:
6181       llvm_unreachable("Unknown loc info!");
6182     case CCValAssign::Full:
6183       break;
6184     case CCValAssign::BCvt:
6185       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6186       break;
6187     case CCValAssign::AExtUpper:
6188       Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
6189                         DAG.getConstant(32, DL, VA.getLocVT()));
6190       LLVM_FALLTHROUGH;
6191     case CCValAssign::AExt:
6192       LLVM_FALLTHROUGH;
6193     case CCValAssign::ZExt:
6194       Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
6195       break;
6196     }
6197 
6198     InVals.push_back(Val);
6199   }
6200 
6201   return Chain;
6202 }
6203 
6204 /// Return true if the calling convention is one that we can guarantee TCO for.
6205 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
6206   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
6207          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
6208 }
6209 
6210 /// Return true if we might ever do TCO for calls with this calling convention.
6211 static bool mayTailCallThisCC(CallingConv::ID CC) {
6212   switch (CC) {
6213   case CallingConv::C:
6214   case CallingConv::AArch64_SVE_VectorCall:
6215   case CallingConv::PreserveMost:
6216   case CallingConv::Swift:
6217   case CallingConv::SwiftTail:
6218   case CallingConv::Tail:
6219   case CallingConv::Fast:
6220     return true;
6221   default:
6222     return false;
6223   }
6224 }
6225 
6226 static void analyzeCallOperands(const AArch64TargetLowering &TLI,
6227                                 const AArch64Subtarget *Subtarget,
6228                                 const TargetLowering::CallLoweringInfo &CLI,
6229                                 CCState &CCInfo) {
6230   const SelectionDAG &DAG = CLI.DAG;
6231   CallingConv::ID CalleeCC = CLI.CallConv;
6232   bool IsVarArg = CLI.IsVarArg;
6233   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6234   bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
6235 
6236   unsigned NumArgs = Outs.size();
6237   for (unsigned i = 0; i != NumArgs; ++i) {
6238     MVT ArgVT = Outs[i].VT;
6239     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6240 
6241     bool UseVarArgCC = false;
6242     if (IsVarArg) {
6243       // On Windows, the fixed arguments in a vararg call are passed in GPRs
6244       // too, so use the vararg CC to force them to integer registers.
6245       if (IsCalleeWin64) {
6246         UseVarArgCC = true;
6247       } else {
6248         UseVarArgCC = !Outs[i].IsFixed;
6249       }
6250     } else {
6251       // Get type of the original argument.
6252       EVT ActualVT =
6253           TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty,
6254                        /*AllowUnknown*/ true);
6255       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT;
6256       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
6257       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
6258         ArgVT = MVT::i8;
6259       else if (ActualMVT == MVT::i16)
6260         ArgVT = MVT::i16;
6261     }
6262 
6263     CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC);
6264     bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
6265     assert(!Res && "Call operand has unhandled type");
6266     (void)Res;
6267   }
6268 }
6269 
6270 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
6271     const CallLoweringInfo &CLI) const {
6272   CallingConv::ID CalleeCC = CLI.CallConv;
6273   if (!mayTailCallThisCC(CalleeCC))
6274     return false;
6275 
6276   SDValue Callee = CLI.Callee;
6277   bool IsVarArg = CLI.IsVarArg;
6278   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6279   const SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6280   const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6281   const SelectionDAG &DAG = CLI.DAG;
6282   MachineFunction &MF = DAG.getMachineFunction();
6283   const Function &CallerF = MF.getFunction();
6284   CallingConv::ID CallerCC = CallerF.getCallingConv();
6285 
6286   // Functions using the C or Fast calling convention that have an SVE signature
6287   // preserve more registers and should assume the SVE_VectorCall CC.
6288   // The check for matching callee-saved regs will determine whether it is
6289   // eligible for TCO.
6290   if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) &&
6291       MF.getInfo<AArch64FunctionInfo>()->isSVECC())
6292     CallerCC = CallingConv::AArch64_SVE_VectorCall;
6293 
6294   bool CCMatch = CallerCC == CalleeCC;
6295 
6296   // When using the Windows calling convention on a non-windows OS, we want
6297   // to back up and restore X18 in such functions; we can't do a tail call
6298   // from those functions.
6299   if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() &&
6300       CalleeCC != CallingConv::Win64)
6301     return false;
6302 
6303   // Byval parameters hand the function a pointer directly into the stack area
6304   // we want to reuse during a tail call. Working around this *is* possible (see
6305   // X86) but less efficient and uglier in LowerCall.
6306   for (Function::const_arg_iterator i = CallerF.arg_begin(),
6307                                     e = CallerF.arg_end();
6308        i != e; ++i) {
6309     if (i->hasByValAttr())
6310       return false;
6311 
6312     // On Windows, "inreg" attributes signify non-aggregate indirect returns.
6313     // In this case, it is necessary to save/restore X0 in the callee. Tail
6314     // call opt interferes with this. So we disable tail call opt when the
6315     // caller has an argument with "inreg" attribute.
6316 
6317     // FIXME: Check whether the callee also has an "inreg" argument.
6318     if (i->hasInRegAttr())
6319       return false;
6320   }
6321 
6322   if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
6323     return CCMatch;
6324 
6325   // Externally-defined functions with weak linkage should not be
6326   // tail-called on AArch64 when the OS does not support dynamic
6327   // pre-emption of symbols, as the AAELF spec requires normal calls
6328   // to undefined weak functions to be replaced with a NOP or jump to the
6329   // next instruction. The behaviour of branch instructions in this
6330   // situation (as used for tail calls) is implementation-defined, so we
6331   // cannot rely on the linker replacing the tail call with a return.
6332   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6333     const GlobalValue *GV = G->getGlobal();
6334     const Triple &TT = getTargetMachine().getTargetTriple();
6335     if (GV->hasExternalWeakLinkage() &&
6336         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
6337       return false;
6338   }
6339 
6340   // Now we search for cases where we can use a tail call without changing the
6341   // ABI. Sibcall is used in some places (particularly gcc) to refer to this
6342   // concept.
6343 
6344   // I want anyone implementing a new calling convention to think long and hard
6345   // about this assert.
6346   assert((!IsVarArg || CalleeCC == CallingConv::C) &&
6347          "Unexpected variadic calling convention");
6348 
6349   LLVMContext &C = *DAG.getContext();
6350   // Check that the call results are passed in the same way.
6351   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
6352                                   CCAssignFnForCall(CalleeCC, IsVarArg),
6353                                   CCAssignFnForCall(CallerCC, IsVarArg)))
6354     return false;
6355   // The callee has to preserve all registers the caller needs to preserve.
6356   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6357   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6358   if (!CCMatch) {
6359     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6360     if (Subtarget->hasCustomCallingConv()) {
6361       TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
6362       TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
6363     }
6364     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6365       return false;
6366   }
6367 
6368   // Nothing more to check if the callee is taking no arguments
6369   if (Outs.empty())
6370     return true;
6371 
6372   SmallVector<CCValAssign, 16> ArgLocs;
6373   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
6374 
6375   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6376 
6377   if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) {
6378     // When we are musttail, additional checks have been done and we can safely ignore this check
6379     // At least two cases here: if caller is fastcc then we can't have any
6380     // memory arguments (we'd be expected to clean up the stack afterwards). If
6381     // caller is C then we could potentially use its argument area.
6382 
6383     // FIXME: for now we take the most conservative of these in both cases:
6384     // disallow all variadic memory operands.
6385     for (const CCValAssign &ArgLoc : ArgLocs)
6386       if (!ArgLoc.isRegLoc())
6387         return false;
6388   }
6389 
6390   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6391 
6392   // If any of the arguments is passed indirectly, it must be SVE, so the
6393   // 'getBytesInStackArgArea' is not sufficient to determine whether we need to
6394   // allocate space on the stack. That is why we determine this explicitly here
6395   // the call cannot be a tailcall.
6396   if (llvm::any_of(ArgLocs, [](CCValAssign &A) {
6397         assert((A.getLocInfo() != CCValAssign::Indirect ||
6398                 A.getValVT().isScalableVector()) &&
6399                "Expected value to be scalable");
6400         return A.getLocInfo() == CCValAssign::Indirect;
6401       }))
6402     return false;
6403 
6404   // If the stack arguments for this call do not fit into our own save area then
6405   // the call cannot be made tail.
6406   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
6407     return false;
6408 
6409   const MachineRegisterInfo &MRI = MF.getRegInfo();
6410   if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
6411     return false;
6412 
6413   return true;
6414 }
6415 
6416 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
6417                                                    SelectionDAG &DAG,
6418                                                    MachineFrameInfo &MFI,
6419                                                    int ClobberedFI) const {
6420   SmallVector<SDValue, 8> ArgChains;
6421   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
6422   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
6423 
6424   // Include the original chain at the beginning of the list. When this is
6425   // used by target LowerCall hooks, this helps legalize find the
6426   // CALLSEQ_BEGIN node.
6427   ArgChains.push_back(Chain);
6428 
6429   // Add a chain value for each stack argument corresponding
6430   for (SDNode *U : DAG.getEntryNode().getNode()->uses())
6431     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
6432       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6433         if (FI->getIndex() < 0) {
6434           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
6435           int64_t InLastByte = InFirstByte;
6436           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
6437 
6438           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
6439               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
6440             ArgChains.push_back(SDValue(L, 1));
6441         }
6442 
6443   // Build a tokenfactor for all the chains.
6444   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6445 }
6446 
6447 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
6448                                                    bool TailCallOpt) const {
6449   return (CallCC == CallingConv::Fast && TailCallOpt) ||
6450          CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail;
6451 }
6452 
6453 // Check if the value is zero-extended from i1 to i8
6454 static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
6455   unsigned SizeInBits = Arg.getValueType().getSizeInBits();
6456   if (SizeInBits < 8)
6457     return false;
6458 
6459   APInt RequredZero(SizeInBits, 0xFE);
6460   KnownBits Bits = DAG.computeKnownBits(Arg, 4);
6461   bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
6462   return ZExtBool;
6463 }
6464 
6465 /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
6466 /// and add input and output parameter nodes.
6467 SDValue
6468 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
6469                                  SmallVectorImpl<SDValue> &InVals) const {
6470   SelectionDAG &DAG = CLI.DAG;
6471   SDLoc &DL = CLI.DL;
6472   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6473   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6474   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6475   SDValue Chain = CLI.Chain;
6476   SDValue Callee = CLI.Callee;
6477   bool &IsTailCall = CLI.IsTailCall;
6478   CallingConv::ID &CallConv = CLI.CallConv;
6479   bool IsVarArg = CLI.IsVarArg;
6480 
6481   MachineFunction &MF = DAG.getMachineFunction();
6482   MachineFunction::CallSiteInfo CSInfo;
6483   bool IsThisReturn = false;
6484 
6485   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6486   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6487   bool IsSibCall = false;
6488   bool GuardWithBTI = false;
6489 
6490   if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
6491       !Subtarget->noBTIAtReturnTwice()) {
6492     GuardWithBTI = FuncInfo->branchTargetEnforcement();
6493   }
6494 
6495   // Check callee args/returns for SVE registers and set calling convention
6496   // accordingly.
6497   if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
6498     bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){
6499       return Out.VT.isScalableVector();
6500     });
6501     bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){
6502       return In.VT.isScalableVector();
6503     });
6504 
6505     if (CalleeInSVE || CalleeOutSVE)
6506       CallConv = CallingConv::AArch64_SVE_VectorCall;
6507   }
6508 
6509   if (IsTailCall) {
6510     // Check if it's really possible to do a tail call.
6511     IsTailCall = isEligibleForTailCallOptimization(CLI);
6512 
6513     // A sibling call is one where we're under the usual C ABI and not planning
6514     // to change that but can still do a tail call:
6515     if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail &&
6516         CallConv != CallingConv::SwiftTail)
6517       IsSibCall = true;
6518 
6519     if (IsTailCall)
6520       ++NumTailCalls;
6521   }
6522 
6523   if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
6524     report_fatal_error("failed to perform tail call elimination on a call "
6525                        "site marked musttail");
6526 
6527   // Analyze operands of the call, assigning locations to each operand.
6528   SmallVector<CCValAssign, 16> ArgLocs;
6529   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6530 
6531   if (IsVarArg) {
6532     unsigned NumArgs = Outs.size();
6533 
6534     for (unsigned i = 0; i != NumArgs; ++i) {
6535       if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
6536         report_fatal_error("Passing SVE types to variadic functions is "
6537                            "currently not supported");
6538     }
6539   }
6540 
6541   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6542 
6543   // Get a count of how many bytes are to be pushed on the stack.
6544   unsigned NumBytes = CCInfo.getNextStackOffset();
6545 
6546   if (IsSibCall) {
6547     // Since we're not changing the ABI to make this a tail call, the memory
6548     // operands are already available in the caller's incoming argument space.
6549     NumBytes = 0;
6550   }
6551 
6552   // FPDiff is the byte offset of the call's argument area from the callee's.
6553   // Stores to callee stack arguments will be placed in FixedStackSlots offset
6554   // by this amount for a tail call. In a sibling call it must be 0 because the
6555   // caller will deallocate the entire stack and the callee still expects its
6556   // arguments to begin at SP+0. Completely unused for non-tail calls.
6557   int FPDiff = 0;
6558 
6559   if (IsTailCall && !IsSibCall) {
6560     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
6561 
6562     // Since callee will pop argument stack as a tail call, we must keep the
6563     // popped size 16-byte aligned.
6564     NumBytes = alignTo(NumBytes, 16);
6565 
6566     // FPDiff will be negative if this tail call requires more space than we
6567     // would automatically have in our incoming argument space. Positive if we
6568     // can actually shrink the stack.
6569     FPDiff = NumReusableBytes - NumBytes;
6570 
6571     // Update the required reserved area if this is the tail call requiring the
6572     // most argument stack space.
6573     if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
6574       FuncInfo->setTailCallReservedStack(-FPDiff);
6575 
6576     // The stack pointer must be 16-byte aligned at all times it's used for a
6577     // memory operation, which in practice means at *all* times and in
6578     // particular across call boundaries. Therefore our own arguments started at
6579     // a 16-byte aligned SP and the delta applied for the tail call should
6580     // satisfy the same constraint.
6581     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
6582   }
6583 
6584   // Adjust the stack pointer for the new arguments...
6585   // These operations are automatically eliminated by the prolog/epilog pass
6586   if (!IsSibCall)
6587     Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL);
6588 
6589   SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
6590                                         getPointerTy(DAG.getDataLayout()));
6591 
6592   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6593   SmallSet<unsigned, 8> RegsUsed;
6594   SmallVector<SDValue, 8> MemOpChains;
6595   auto PtrVT = getPointerTy(DAG.getDataLayout());
6596 
6597   if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
6598     const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
6599     for (const auto &F : Forwards) {
6600       SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
6601        RegsToPass.emplace_back(F.PReg, Val);
6602     }
6603   }
6604 
6605   // Walk the register/memloc assignments, inserting copies/loads.
6606   unsigned ExtraArgLocs = 0;
6607   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6608     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
6609     SDValue Arg = OutVals[i];
6610     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6611 
6612     // Promote the value if needed.
6613     switch (VA.getLocInfo()) {
6614     default:
6615       llvm_unreachable("Unknown loc info!");
6616     case CCValAssign::Full:
6617       break;
6618     case CCValAssign::SExt:
6619       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
6620       break;
6621     case CCValAssign::ZExt:
6622       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6623       break;
6624     case CCValAssign::AExt:
6625       if (Outs[i].ArgVT == MVT::i1) {
6626         // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
6627         //
6628         // Check if we actually have to do this, because the value may
6629         // already be zero-extended.
6630         //
6631         // We cannot just emit a (zext i8 (trunc (assert-zext i8)))
6632         // and rely on DAGCombiner to fold this, because the following
6633         // (anyext i32) is combined with (zext i8) in DAG.getNode:
6634         //
6635         //   (ext (zext x)) -> (zext x)
6636         //
6637         // This will give us (zext i32), which we cannot remove, so
6638         // try to check this beforehand.
6639         if (!checkZExtBool(Arg, DAG)) {
6640           Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6641           Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
6642         }
6643       }
6644       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6645       break;
6646     case CCValAssign::AExtUpper:
6647       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
6648       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6649       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
6650                         DAG.getConstant(32, DL, VA.getLocVT()));
6651       break;
6652     case CCValAssign::BCvt:
6653       Arg = DAG.getBitcast(VA.getLocVT(), Arg);
6654       break;
6655     case CCValAssign::Trunc:
6656       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
6657       break;
6658     case CCValAssign::FPExt:
6659       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
6660       break;
6661     case CCValAssign::Indirect:
6662       assert(VA.getValVT().isScalableVector() &&
6663              "Only scalable vectors can be passed indirectly");
6664 
6665       uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize();
6666       uint64_t PartSize = StoreSize;
6667       unsigned NumParts = 1;
6668       if (Outs[i].Flags.isInConsecutiveRegs()) {
6669         assert(!Outs[i].Flags.isInConsecutiveRegsLast());
6670         while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
6671           ++NumParts;
6672         StoreSize *= NumParts;
6673       }
6674 
6675       MachineFrameInfo &MFI = MF.getFrameInfo();
6676       Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
6677       Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
6678       int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
6679       MFI.setStackID(FI, TargetStackID::ScalableVector);
6680 
6681       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6682       SDValue Ptr = DAG.getFrameIndex(
6683           FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
6684       SDValue SpillSlot = Ptr;
6685 
6686       // Ensure we generate all stores for each tuple part, whilst updating the
6687       // pointer after each store correctly using vscale.
6688       while (NumParts) {
6689         Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
6690         NumParts--;
6691         if (NumParts > 0) {
6692           SDValue BytesIncrement = DAG.getVScale(
6693               DL, Ptr.getValueType(),
6694               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
6695           SDNodeFlags Flags;
6696           Flags.setNoUnsignedWrap(true);
6697 
6698           MPI = MachinePointerInfo(MPI.getAddrSpace());
6699           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6700                             BytesIncrement, Flags);
6701           ExtraArgLocs++;
6702           i++;
6703         }
6704       }
6705 
6706       Arg = SpillSlot;
6707       break;
6708     }
6709 
6710     if (VA.isRegLoc()) {
6711       if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
6712           Outs[0].VT == MVT::i64) {
6713         assert(VA.getLocVT() == MVT::i64 &&
6714                "unexpected calling convention register assignment");
6715         assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
6716                "unexpected use of 'returned'");
6717         IsThisReturn = true;
6718       }
6719       if (RegsUsed.count(VA.getLocReg())) {
6720         // If this register has already been used then we're trying to pack
6721         // parts of an [N x i32] into an X-register. The extension type will
6722         // take care of putting the two halves in the right place but we have to
6723         // combine them.
6724         SDValue &Bits =
6725             llvm::find_if(RegsToPass,
6726                           [=](const std::pair<unsigned, SDValue> &Elt) {
6727                             return Elt.first == VA.getLocReg();
6728                           })
6729                 ->second;
6730         Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
6731         // Call site info is used for function's parameter entry value
6732         // tracking. For now we track only simple cases when parameter
6733         // is transferred through whole register.
6734         llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) {
6735           return ArgReg.Reg == VA.getLocReg();
6736         });
6737       } else {
6738         RegsToPass.emplace_back(VA.getLocReg(), Arg);
6739         RegsUsed.insert(VA.getLocReg());
6740         const TargetOptions &Options = DAG.getTarget().Options;
6741         if (Options.EmitCallSiteInfo)
6742           CSInfo.emplace_back(VA.getLocReg(), i);
6743       }
6744     } else {
6745       assert(VA.isMemLoc());
6746 
6747       SDValue DstAddr;
6748       MachinePointerInfo DstInfo;
6749 
6750       // FIXME: This works on big-endian for composite byvals, which are the
6751       // common case. It should also work for fundamental types too.
6752       uint32_t BEAlign = 0;
6753       unsigned OpSize;
6754       if (VA.getLocInfo() == CCValAssign::Indirect ||
6755           VA.getLocInfo() == CCValAssign::Trunc)
6756         OpSize = VA.getLocVT().getFixedSizeInBits();
6757       else
6758         OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
6759                                  : VA.getValVT().getSizeInBits();
6760       OpSize = (OpSize + 7) / 8;
6761       if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
6762           !Flags.isInConsecutiveRegs()) {
6763         if (OpSize < 8)
6764           BEAlign = 8 - OpSize;
6765       }
6766       unsigned LocMemOffset = VA.getLocMemOffset();
6767       int32_t Offset = LocMemOffset + BEAlign;
6768       SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6769       PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6770 
6771       if (IsTailCall) {
6772         Offset = Offset + FPDiff;
6773         int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
6774 
6775         DstAddr = DAG.getFrameIndex(FI, PtrVT);
6776         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
6777 
6778         // Make sure any stack arguments overlapping with where we're storing
6779         // are loaded before this eventual operation. Otherwise they'll be
6780         // clobbered.
6781         Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
6782       } else {
6783         SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6784 
6785         DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6786         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
6787       }
6788 
6789       if (Outs[i].Flags.isByVal()) {
6790         SDValue SizeNode =
6791             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
6792         SDValue Cpy = DAG.getMemcpy(
6793             Chain, DL, DstAddr, Arg, SizeNode,
6794             Outs[i].Flags.getNonZeroByValAlign(),
6795             /*isVol = */ false, /*AlwaysInline = */ false,
6796             /*isTailCall = */ false, DstInfo, MachinePointerInfo());
6797 
6798         MemOpChains.push_back(Cpy);
6799       } else {
6800         // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
6801         // promoted to a legal register type i32, we should truncate Arg back to
6802         // i1/i8/i16.
6803         if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
6804             VA.getValVT() == MVT::i16)
6805           Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
6806 
6807         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
6808         MemOpChains.push_back(Store);
6809       }
6810     }
6811   }
6812 
6813   if (!MemOpChains.empty())
6814     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
6815 
6816   // Build a sequence of copy-to-reg nodes chained together with token chain
6817   // and flag operands which copy the outgoing args into the appropriate regs.
6818   SDValue InFlag;
6819   for (auto &RegToPass : RegsToPass) {
6820     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
6821                              RegToPass.second, InFlag);
6822     InFlag = Chain.getValue(1);
6823   }
6824 
6825   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
6826   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
6827   // node so that legalize doesn't hack it.
6828   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6829     auto GV = G->getGlobal();
6830     unsigned OpFlags =
6831         Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine());
6832     if (OpFlags & AArch64II::MO_GOT) {
6833       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
6834       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6835     } else {
6836       const GlobalValue *GV = G->getGlobal();
6837       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
6838     }
6839   } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
6840     if (getTargetMachine().getCodeModel() == CodeModel::Large &&
6841         Subtarget->isTargetMachO()) {
6842       const char *Sym = S->getSymbol();
6843       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
6844       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6845     } else {
6846       const char *Sym = S->getSymbol();
6847       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
6848     }
6849   }
6850 
6851   // We don't usually want to end the call-sequence here because we would tidy
6852   // the frame up *after* the call, however in the ABI-changing tail-call case
6853   // we've carefully laid out the parameters so that when sp is reset they'll be
6854   // in the correct location.
6855   if (IsTailCall && !IsSibCall) {
6856     Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
6857                                DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
6858     InFlag = Chain.getValue(1);
6859   }
6860 
6861   std::vector<SDValue> Ops;
6862   Ops.push_back(Chain);
6863   Ops.push_back(Callee);
6864 
6865   if (IsTailCall) {
6866     // Each tail call may have to adjust the stack by a different amount, so
6867     // this information must travel along with the operation for eventual
6868     // consumption by emitEpilogue.
6869     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
6870   }
6871 
6872   // Add argument registers to the end of the list so that they are known live
6873   // into the call.
6874   for (auto &RegToPass : RegsToPass)
6875     Ops.push_back(DAG.getRegister(RegToPass.first,
6876                                   RegToPass.second.getValueType()));
6877 
6878   // Add a register mask operand representing the call-preserved registers.
6879   const uint32_t *Mask;
6880   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6881   if (IsThisReturn) {
6882     // For 'this' returns, use the X0-preserving mask if applicable
6883     Mask = TRI->getThisReturnPreservedMask(MF, CallConv);
6884     if (!Mask) {
6885       IsThisReturn = false;
6886       Mask = TRI->getCallPreservedMask(MF, CallConv);
6887     }
6888   } else
6889     Mask = TRI->getCallPreservedMask(MF, CallConv);
6890 
6891   if (Subtarget->hasCustomCallingConv())
6892     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
6893 
6894   if (TRI->isAnyArgRegReserved(MF))
6895     TRI->emitReservedArgRegCallError(MF);
6896 
6897   assert(Mask && "Missing call preserved mask for calling convention");
6898   Ops.push_back(DAG.getRegisterMask(Mask));
6899 
6900   if (InFlag.getNode())
6901     Ops.push_back(InFlag);
6902 
6903   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6904 
6905   // If we're doing a tall call, use a TC_RETURN here rather than an
6906   // actual call instruction.
6907   if (IsTailCall) {
6908     MF.getFrameInfo().setHasTailCall();
6909     SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
6910     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
6911     return Ret;
6912   }
6913 
6914   unsigned CallOpc = AArch64ISD::CALL;
6915   // Calls with operand bundle "clang.arc.attachedcall" are special. They should
6916   // be expanded to the call, directly followed by a special marker sequence and
6917   // a call to an ObjC library function.  Use CALL_RVMARKER to do that.
6918   if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
6919     assert(!IsTailCall &&
6920            "tail calls cannot be marked with clang.arc.attachedcall");
6921     CallOpc = AArch64ISD::CALL_RVMARKER;
6922 
6923     // Add a target global address for the retainRV/claimRV runtime function
6924     // just before the call target.
6925     Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
6926     auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT);
6927     Ops.insert(Ops.begin() + 1, GA);
6928   } else if (GuardWithBTI)
6929     CallOpc = AArch64ISD::CALL_BTI;
6930 
6931   // Returns a chain and a flag for retval copy to use.
6932   Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
6933   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
6934   InFlag = Chain.getValue(1);
6935   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
6936 
6937   uint64_t CalleePopBytes =
6938       DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
6939 
6940   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
6941                              DAG.getIntPtrConstant(CalleePopBytes, DL, true),
6942                              InFlag, DL);
6943   if (!Ins.empty())
6944     InFlag = Chain.getValue(1);
6945 
6946   // Handle result values, copying them out of physregs into vregs that we
6947   // return.
6948   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
6949                          InVals, IsThisReturn,
6950                          IsThisReturn ? OutVals[0] : SDValue());
6951 }
6952 
6953 bool AArch64TargetLowering::CanLowerReturn(
6954     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
6955     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
6956   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6957   SmallVector<CCValAssign, 16> RVLocs;
6958   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6959   return CCInfo.CheckReturn(Outs, RetCC);
6960 }
6961 
6962 SDValue
6963 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6964                                    bool isVarArg,
6965                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
6966                                    const SmallVectorImpl<SDValue> &OutVals,
6967                                    const SDLoc &DL, SelectionDAG &DAG) const {
6968   auto &MF = DAG.getMachineFunction();
6969   auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6970 
6971   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6972   SmallVector<CCValAssign, 16> RVLocs;
6973   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
6974   CCInfo.AnalyzeReturn(Outs, RetCC);
6975 
6976   // Copy the result values into the output registers.
6977   SDValue Flag;
6978   SmallVector<std::pair<unsigned, SDValue>, 4> RetVals;
6979   SmallSet<unsigned, 4> RegsUsed;
6980   for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
6981        ++i, ++realRVLocIdx) {
6982     CCValAssign &VA = RVLocs[i];
6983     assert(VA.isRegLoc() && "Can only return in registers!");
6984     SDValue Arg = OutVals[realRVLocIdx];
6985 
6986     switch (VA.getLocInfo()) {
6987     default:
6988       llvm_unreachable("Unknown loc info!");
6989     case CCValAssign::Full:
6990       if (Outs[i].ArgVT == MVT::i1) {
6991         // AAPCS requires i1 to be zero-extended to i8 by the producer of the
6992         // value. This is strictly redundant on Darwin (which uses "zeroext
6993         // i1"), but will be optimised out before ISel.
6994         Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6995         Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6996       }
6997       break;
6998     case CCValAssign::BCvt:
6999       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
7000       break;
7001     case CCValAssign::AExt:
7002     case CCValAssign::ZExt:
7003       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7004       break;
7005     case CCValAssign::AExtUpper:
7006       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
7007       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7008       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
7009                         DAG.getConstant(32, DL, VA.getLocVT()));
7010       break;
7011     }
7012 
7013     if (RegsUsed.count(VA.getLocReg())) {
7014       SDValue &Bits =
7015           llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) {
7016             return Elt.first == VA.getLocReg();
7017           })->second;
7018       Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
7019     } else {
7020       RetVals.emplace_back(VA.getLocReg(), Arg);
7021       RegsUsed.insert(VA.getLocReg());
7022     }
7023   }
7024 
7025   SmallVector<SDValue, 4> RetOps(1, Chain);
7026   for (auto &RetVal : RetVals) {
7027     Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag);
7028     Flag = Chain.getValue(1);
7029     RetOps.push_back(
7030         DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
7031   }
7032 
7033   // Windows AArch64 ABIs require that for returning structs by value we copy
7034   // the sret argument into X0 for the return.
7035   // We saved the argument into a virtual register in the entry block,
7036   // so now we copy the value out and into X0.
7037   if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
7038     SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
7039                                      getPointerTy(MF.getDataLayout()));
7040 
7041     unsigned RetValReg = AArch64::X0;
7042     Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
7043     Flag = Chain.getValue(1);
7044 
7045     RetOps.push_back(
7046       DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
7047   }
7048 
7049   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7050   const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF);
7051   if (I) {
7052     for (; *I; ++I) {
7053       if (AArch64::GPR64RegClass.contains(*I))
7054         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7055       else if (AArch64::FPR64RegClass.contains(*I))
7056         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7057       else
7058         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7059     }
7060   }
7061 
7062   RetOps[0] = Chain; // Update chain.
7063 
7064   // Add the flag if we have it.
7065   if (Flag.getNode())
7066     RetOps.push_back(Flag);
7067 
7068   return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
7069 }
7070 
7071 //===----------------------------------------------------------------------===//
7072 //  Other Lowering Code
7073 //===----------------------------------------------------------------------===//
7074 
7075 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
7076                                              SelectionDAG &DAG,
7077                                              unsigned Flag) const {
7078   return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
7079                                     N->getOffset(), Flag);
7080 }
7081 
7082 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
7083                                              SelectionDAG &DAG,
7084                                              unsigned Flag) const {
7085   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
7086 }
7087 
7088 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
7089                                              SelectionDAG &DAG,
7090                                              unsigned Flag) const {
7091   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
7092                                    N->getOffset(), Flag);
7093 }
7094 
7095 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
7096                                              SelectionDAG &DAG,
7097                                              unsigned Flag) const {
7098   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
7099 }
7100 
7101 // (loadGOT sym)
7102 template <class NodeTy>
7103 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
7104                                       unsigned Flags) const {
7105   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
7106   SDLoc DL(N);
7107   EVT Ty = getPointerTy(DAG.getDataLayout());
7108   SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
7109   // FIXME: Once remat is capable of dealing with instructions with register
7110   // operands, expand this into two nodes instead of using a wrapper node.
7111   return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
7112 }
7113 
7114 // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
7115 template <class NodeTy>
7116 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
7117                                             unsigned Flags) const {
7118   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
7119   SDLoc DL(N);
7120   EVT Ty = getPointerTy(DAG.getDataLayout());
7121   const unsigned char MO_NC = AArch64II::MO_NC;
7122   return DAG.getNode(
7123       AArch64ISD::WrapperLarge, DL, Ty,
7124       getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags),
7125       getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags),
7126       getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags),
7127       getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags));
7128 }
7129 
7130 // (addlow (adrp %hi(sym)) %lo(sym))
7131 template <class NodeTy>
7132 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
7133                                        unsigned Flags) const {
7134   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
7135   SDLoc DL(N);
7136   EVT Ty = getPointerTy(DAG.getDataLayout());
7137   SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
7138   SDValue Lo = getTargetNode(N, Ty, DAG,
7139                              AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags);
7140   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
7141   return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
7142 }
7143 
7144 // (adr sym)
7145 template <class NodeTy>
7146 SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
7147                                            unsigned Flags) const {
7148   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
7149   SDLoc DL(N);
7150   EVT Ty = getPointerTy(DAG.getDataLayout());
7151   SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
7152   return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
7153 }
7154 
7155 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
7156                                                   SelectionDAG &DAG) const {
7157   GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
7158   const GlobalValue *GV = GN->getGlobal();
7159   unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
7160 
7161   if (OpFlags != AArch64II::MO_NO_FLAG)
7162     assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
7163            "unexpected offset in global node");
7164 
7165   // This also catches the large code model case for Darwin, and tiny code
7166   // model with got relocations.
7167   if ((OpFlags & AArch64II::MO_GOT) != 0) {
7168     return getGOT(GN, DAG, OpFlags);
7169   }
7170 
7171   SDValue Result;
7172   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
7173     Result = getAddrLarge(GN, DAG, OpFlags);
7174   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
7175     Result = getAddrTiny(GN, DAG, OpFlags);
7176   } else {
7177     Result = getAddr(GN, DAG, OpFlags);
7178   }
7179   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7180   SDLoc DL(GN);
7181   if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB))
7182     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
7183                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
7184   return Result;
7185 }
7186 
7187 /// Convert a TLS address reference into the correct sequence of loads
7188 /// and calls to compute the variable's address (for Darwin, currently) and
7189 /// return an SDValue containing the final node.
7190 
7191 /// Darwin only has one TLS scheme which must be capable of dealing with the
7192 /// fully general situation, in the worst case. This means:
7193 ///     + "extern __thread" declaration.
7194 ///     + Defined in a possibly unknown dynamic library.
7195 ///
7196 /// The general system is that each __thread variable has a [3 x i64] descriptor
7197 /// which contains information used by the runtime to calculate the address. The
7198 /// only part of this the compiler needs to know about is the first xword, which
7199 /// contains a function pointer that must be called with the address of the
7200 /// entire descriptor in "x0".
7201 ///
7202 /// Since this descriptor may be in a different unit, in general even the
7203 /// descriptor must be accessed via an indirect load. The "ideal" code sequence
7204 /// is:
7205 ///     adrp x0, _var@TLVPPAGE
7206 ///     ldr x0, [x0, _var@TLVPPAGEOFF]   ; x0 now contains address of descriptor
7207 ///     ldr x1, [x0]                     ; x1 contains 1st entry of descriptor,
7208 ///                                      ; the function pointer
7209 ///     blr x1                           ; Uses descriptor address in x0
7210 ///     ; Address of _var is now in x0.
7211 ///
7212 /// If the address of _var's descriptor *is* known to the linker, then it can
7213 /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
7214 /// a slight efficiency gain.
7215 SDValue
7216 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
7217                                                    SelectionDAG &DAG) const {
7218   assert(Subtarget->isTargetDarwin() &&
7219          "This function expects a Darwin target");
7220 
7221   SDLoc DL(Op);
7222   MVT PtrVT = getPointerTy(DAG.getDataLayout());
7223   MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout());
7224   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
7225 
7226   SDValue TLVPAddr =
7227       DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7228   SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
7229 
7230   // The first entry in the descriptor is a function pointer that we must call
7231   // to obtain the address of the variable.
7232   SDValue Chain = DAG.getEntryNode();
7233   SDValue FuncTLVGet = DAG.getLoad(
7234       PtrMemVT, DL, Chain, DescAddr,
7235       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
7236       Align(PtrMemVT.getSizeInBits() / 8),
7237       MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7238   Chain = FuncTLVGet.getValue(1);
7239 
7240   // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer.
7241   FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT);
7242 
7243   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7244   MFI.setAdjustsStack(true);
7245 
7246   // TLS calls preserve all registers except those that absolutely must be
7247   // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
7248   // silly).
7249   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7250   const uint32_t *Mask = TRI->getTLSCallPreservedMask();
7251   if (Subtarget->hasCustomCallingConv())
7252     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
7253 
7254   // Finally, we can make the call. This is just a degenerate version of a
7255   // normal AArch64 call node: x0 takes the address of the descriptor, and
7256   // returns the address of the variable in this thread.
7257   Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
7258   Chain =
7259       DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
7260                   Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
7261                   DAG.getRegisterMask(Mask), Chain.getValue(1));
7262   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
7263 }
7264 
7265 /// Convert a thread-local variable reference into a sequence of instructions to
7266 /// compute the variable's address for the local exec TLS model of ELF targets.
7267 /// The sequence depends on the maximum TLS area size.
7268 SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
7269                                                     SDValue ThreadBase,
7270                                                     const SDLoc &DL,
7271                                                     SelectionDAG &DAG) const {
7272   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7273   SDValue TPOff, Addr;
7274 
7275   switch (DAG.getTarget().Options.TLSSize) {
7276   default:
7277     llvm_unreachable("Unexpected TLS size");
7278 
7279   case 12: {
7280     // mrs   x0, TPIDR_EL0
7281     // add   x0, x0, :tprel_lo12:a
7282     SDValue Var = DAG.getTargetGlobalAddress(
7283         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
7284     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7285                                       Var,
7286                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7287                    0);
7288   }
7289 
7290   case 24: {
7291     // mrs   x0, TPIDR_EL0
7292     // add   x0, x0, :tprel_hi12:a
7293     // add   x0, x0, :tprel_lo12_nc:a
7294     SDValue HiVar = DAG.getTargetGlobalAddress(
7295         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7296     SDValue LoVar = DAG.getTargetGlobalAddress(
7297         GV, DL, PtrVT, 0,
7298         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7299     Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7300                                       HiVar,
7301                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7302                    0);
7303     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr,
7304                                       LoVar,
7305                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7306                    0);
7307   }
7308 
7309   case 32: {
7310     // mrs   x1, TPIDR_EL0
7311     // movz  x0, #:tprel_g1:a
7312     // movk  x0, #:tprel_g0_nc:a
7313     // add   x0, x1, x0
7314     SDValue HiVar = DAG.getTargetGlobalAddress(
7315         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
7316     SDValue LoVar = DAG.getTargetGlobalAddress(
7317         GV, DL, PtrVT, 0,
7318         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7319     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7320                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7321                     0);
7322     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7323                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7324                     0);
7325     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7326   }
7327 
7328   case 48: {
7329     // mrs   x1, TPIDR_EL0
7330     // movz  x0, #:tprel_g2:a
7331     // movk  x0, #:tprel_g1_nc:a
7332     // movk  x0, #:tprel_g0_nc:a
7333     // add   x0, x1, x0
7334     SDValue HiVar = DAG.getTargetGlobalAddress(
7335         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2);
7336     SDValue MiVar = DAG.getTargetGlobalAddress(
7337         GV, DL, PtrVT, 0,
7338         AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC);
7339     SDValue LoVar = DAG.getTargetGlobalAddress(
7340         GV, DL, PtrVT, 0,
7341         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7342     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7343                                        DAG.getTargetConstant(32, DL, MVT::i32)),
7344                     0);
7345     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar,
7346                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7347                     0);
7348     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7349                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7350                     0);
7351     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7352   }
7353   }
7354 }
7355 
7356 /// When accessing thread-local variables under either the general-dynamic or
7357 /// local-dynamic system, we make a "TLS-descriptor" call. The variable will
7358 /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
7359 /// is a function pointer to carry out the resolution.
7360 ///
7361 /// The sequence is:
7362 ///    adrp  x0, :tlsdesc:var
7363 ///    ldr   x1, [x0, #:tlsdesc_lo12:var]
7364 ///    add   x0, x0, #:tlsdesc_lo12:var
7365 ///    .tlsdesccall var
7366 ///    blr   x1
7367 ///    (TPIDR_EL0 offset now in x0)
7368 ///
7369 ///  The above sequence must be produced unscheduled, to enable the linker to
7370 ///  optimize/relax this sequence.
7371 ///  Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
7372 ///  above sequence, and expanded really late in the compilation flow, to ensure
7373 ///  the sequence is produced as per above.
7374 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
7375                                                       const SDLoc &DL,
7376                                                       SelectionDAG &DAG) const {
7377   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7378 
7379   SDValue Chain = DAG.getEntryNode();
7380   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7381 
7382   Chain =
7383       DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
7384   SDValue Glue = Chain.getValue(1);
7385 
7386   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
7387 }
7388 
7389 SDValue
7390 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
7391                                                 SelectionDAG &DAG) const {
7392   assert(Subtarget->isTargetELF() && "This function expects an ELF target");
7393 
7394   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7395 
7396   TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
7397 
7398   if (!EnableAArch64ELFLocalDynamicTLSGeneration) {
7399     if (Model == TLSModel::LocalDynamic)
7400       Model = TLSModel::GeneralDynamic;
7401   }
7402 
7403   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
7404       Model != TLSModel::LocalExec)
7405     report_fatal_error("ELF TLS only supported in small memory model or "
7406                        "in local exec TLS model");
7407   // Different choices can be made for the maximum size of the TLS area for a
7408   // module. For the small address model, the default TLS size is 16MiB and the
7409   // maximum TLS size is 4GiB.
7410   // FIXME: add tiny and large code model support for TLS access models other
7411   // than local exec. We currently generate the same code as small for tiny,
7412   // which may be larger than needed.
7413 
7414   SDValue TPOff;
7415   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7416   SDLoc DL(Op);
7417   const GlobalValue *GV = GA->getGlobal();
7418 
7419   SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
7420 
7421   if (Model == TLSModel::LocalExec) {
7422     return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG);
7423   } else if (Model == TLSModel::InitialExec) {
7424     TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7425     TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
7426   } else if (Model == TLSModel::LocalDynamic) {
7427     // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
7428     // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
7429     // the beginning of the module's TLS region, followed by a DTPREL offset
7430     // calculation.
7431 
7432     // These accesses will need deduplicating if there's more than one.
7433     AArch64FunctionInfo *MFI =
7434         DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
7435     MFI->incNumLocalDynamicTLSAccesses();
7436 
7437     // The call needs a relocation too for linker relaxation. It doesn't make
7438     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7439     // the address.
7440     SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
7441                                                   AArch64II::MO_TLS);
7442 
7443     // Now we can calculate the offset from TPIDR_EL0 to this module's
7444     // thread-local area.
7445     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7446 
7447     // Now use :dtprel_whatever: operations to calculate this variable's offset
7448     // in its thread-storage area.
7449     SDValue HiVar = DAG.getTargetGlobalAddress(
7450         GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7451     SDValue LoVar = DAG.getTargetGlobalAddress(
7452         GV, DL, MVT::i64, 0,
7453         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7454 
7455     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
7456                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7457                     0);
7458     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
7459                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7460                     0);
7461   } else if (Model == TLSModel::GeneralDynamic) {
7462     // The call needs a relocation too for linker relaxation. It doesn't make
7463     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7464     // the address.
7465     SDValue SymAddr =
7466         DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7467 
7468     // Finally we can make a call to calculate the offset from tpidr_el0.
7469     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7470   } else
7471     llvm_unreachable("Unsupported ELF TLS access model");
7472 
7473   return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7474 }
7475 
7476 SDValue
7477 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
7478                                                     SelectionDAG &DAG) const {
7479   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
7480 
7481   SDValue Chain = DAG.getEntryNode();
7482   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7483   SDLoc DL(Op);
7484 
7485   SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64);
7486 
7487   // Load the ThreadLocalStoragePointer from the TEB
7488   // A pointer to the TLS array is located at offset 0x58 from the TEB.
7489   SDValue TLSArray =
7490       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL));
7491   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
7492   Chain = TLSArray.getValue(1);
7493 
7494   // Load the TLS index from the C runtime;
7495   // This does the same as getAddr(), but without having a GlobalAddressSDNode.
7496   // This also does the same as LOADgot, but using a generic i32 load,
7497   // while LOADgot only loads i64.
7498   SDValue TLSIndexHi =
7499       DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE);
7500   SDValue TLSIndexLo = DAG.getTargetExternalSymbol(
7501       "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7502   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi);
7503   SDValue TLSIndex =
7504       DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo);
7505   TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo());
7506   Chain = TLSIndex.getValue(1);
7507 
7508   // The pointer to the thread's TLS data area is at the TLS Index scaled by 8
7509   // offset into the TLSArray.
7510   TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex);
7511   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
7512                              DAG.getConstant(3, DL, PtrVT));
7513   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
7514                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
7515                             MachinePointerInfo());
7516   Chain = TLS.getValue(1);
7517 
7518   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7519   const GlobalValue *GV = GA->getGlobal();
7520   SDValue TGAHi = DAG.getTargetGlobalAddress(
7521       GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7522   SDValue TGALo = DAG.getTargetGlobalAddress(
7523       GV, DL, PtrVT, 0,
7524       AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7525 
7526   // Add the offset from the start of the .tls section (section base).
7527   SDValue Addr =
7528       SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi,
7529                                  DAG.getTargetConstant(0, DL, MVT::i32)),
7530               0);
7531   Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo);
7532   return Addr;
7533 }
7534 
7535 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
7536                                                      SelectionDAG &DAG) const {
7537   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7538   if (DAG.getTarget().useEmulatedTLS())
7539     return LowerToTLSEmulatedModel(GA, DAG);
7540 
7541   if (Subtarget->isTargetDarwin())
7542     return LowerDarwinGlobalTLSAddress(Op, DAG);
7543   if (Subtarget->isTargetELF())
7544     return LowerELFGlobalTLSAddress(Op, DAG);
7545   if (Subtarget->isTargetWindows())
7546     return LowerWindowsGlobalTLSAddress(Op, DAG);
7547 
7548   llvm_unreachable("Unexpected platform trying to use TLS");
7549 }
7550 
7551 // Looks through \param Val to determine the bit that can be used to
7552 // check the sign of the value. It returns the unextended value and
7553 // the sign bit position.
7554 std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {
7555   if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG)
7556     return {Val.getOperand(0),
7557             cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() -
7558                 1};
7559 
7560   if (Val.getOpcode() == ISD::SIGN_EXTEND)
7561     return {Val.getOperand(0),
7562             Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1};
7563 
7564   return {Val, Val.getValueSizeInBits() - 1};
7565 }
7566 
7567 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
7568   SDValue Chain = Op.getOperand(0);
7569   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
7570   SDValue LHS = Op.getOperand(2);
7571   SDValue RHS = Op.getOperand(3);
7572   SDValue Dest = Op.getOperand(4);
7573   SDLoc dl(Op);
7574 
7575   MachineFunction &MF = DAG.getMachineFunction();
7576   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
7577   // will not be produced, as they are conditional branch instructions that do
7578   // not set flags.
7579   bool ProduceNonFlagSettingCondBr =
7580       !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
7581 
7582   // Handle f128 first, since lowering it will result in comparing the return
7583   // value of a libcall against zero, which is just what the rest of LowerBR_CC
7584   // is expecting to deal with.
7585   if (LHS.getValueType() == MVT::f128) {
7586     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
7587 
7588     // If softenSetCCOperands returned a scalar, we need to compare the result
7589     // against zero to select between true and false values.
7590     if (!RHS.getNode()) {
7591       RHS = DAG.getConstant(0, dl, LHS.getValueType());
7592       CC = ISD::SETNE;
7593     }
7594   }
7595 
7596   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
7597   // instruction.
7598   if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) &&
7599       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7600     // Only lower legal XALUO ops.
7601     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
7602       return SDValue();
7603 
7604     // The actual operation with overflow check.
7605     AArch64CC::CondCode OFCC;
7606     SDValue Value, Overflow;
7607     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
7608 
7609     if (CC == ISD::SETNE)
7610       OFCC = getInvertedCondCode(OFCC);
7611     SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
7612 
7613     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7614                        Overflow);
7615   }
7616 
7617   if (LHS.getValueType().isInteger()) {
7618     assert((LHS.getValueType() == RHS.getValueType()) &&
7619            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
7620 
7621     // If the RHS of the comparison is zero, we can potentially fold this
7622     // to a specialized branch.
7623     const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
7624     if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
7625       if (CC == ISD::SETEQ) {
7626         // See if we can use a TBZ to fold in an AND as well.
7627         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7628         // out of bounds, a late MI-layer pass rewrites branches.
7629         // 403.gcc is an example that hits this case.
7630         if (LHS.getOpcode() == ISD::AND &&
7631             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7632             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7633           SDValue Test = LHS.getOperand(0);
7634           uint64_t Mask = LHS.getConstantOperandVal(1);
7635           return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
7636                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7637                              Dest);
7638         }
7639 
7640         return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
7641       } else if (CC == ISD::SETNE) {
7642         // See if we can use a TBZ to fold in an AND as well.
7643         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7644         // out of bounds, a late MI-layer pass rewrites branches.
7645         // 403.gcc is an example that hits this case.
7646         if (LHS.getOpcode() == ISD::AND &&
7647             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7648             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7649           SDValue Test = LHS.getOperand(0);
7650           uint64_t Mask = LHS.getConstantOperandVal(1);
7651           return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
7652                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7653                              Dest);
7654         }
7655 
7656         return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
7657       } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) {
7658         // Don't combine AND since emitComparison converts the AND to an ANDS
7659         // (a.k.a. TST) and the test in the test bit and branch instruction
7660         // becomes redundant.  This would also increase register pressure.
7661         uint64_t SignBitPos;
7662         std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7663         return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
7664                            DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7665       }
7666     }
7667     if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
7668         LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
7669       // Don't combine AND since emitComparison converts the AND to an ANDS
7670       // (a.k.a. TST) and the test in the test bit and branch instruction
7671       // becomes redundant.  This would also increase register pressure.
7672       uint64_t SignBitPos;
7673       std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7674       return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
7675                          DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7676     }
7677 
7678     SDValue CCVal;
7679     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
7680     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7681                        Cmp);
7682   }
7683 
7684   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||
7685          LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
7686 
7687   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
7688   // clean.  Some of them require two branches to implement.
7689   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
7690   AArch64CC::CondCode CC1, CC2;
7691   changeFPCCToAArch64CC(CC, CC1, CC2);
7692   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
7693   SDValue BR1 =
7694       DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
7695   if (CC2 != AArch64CC::AL) {
7696     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
7697     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
7698                        Cmp);
7699   }
7700 
7701   return BR1;
7702 }
7703 
7704 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
7705                                               SelectionDAG &DAG) const {
7706   if (!Subtarget->hasNEON())
7707     return SDValue();
7708 
7709   EVT VT = Op.getValueType();
7710   EVT IntVT = VT.changeTypeToInteger();
7711   SDLoc DL(Op);
7712 
7713   SDValue In1 = Op.getOperand(0);
7714   SDValue In2 = Op.getOperand(1);
7715   EVT SrcVT = In2.getValueType();
7716 
7717   if (SrcVT.bitsLT(VT))
7718     In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
7719   else if (SrcVT.bitsGT(VT))
7720     In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL));
7721 
7722   if (VT.isScalableVector())
7723     IntVT =
7724         getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
7725 
7726   if (VT != In2.getValueType())
7727     return SDValue();
7728 
7729   auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
7730     if (VT.isScalableVector())
7731       return getSVESafeBitCast(VT, Op, DAG);
7732 
7733     return DAG.getBitcast(VT, Op);
7734   };
7735 
7736   SDValue VecVal1, VecVal2;
7737   EVT VecVT;
7738   auto SetVecVal = [&](int Idx = -1) {
7739     if (!VT.isVector()) {
7740       VecVal1 =
7741           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1);
7742       VecVal2 =
7743           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2);
7744     } else {
7745       VecVal1 = BitCast(VecVT, In1, DAG);
7746       VecVal2 = BitCast(VecVT, In2, DAG);
7747     }
7748   };
7749   if (VT.isVector()) {
7750     VecVT = IntVT;
7751     SetVecVal();
7752   } else if (VT == MVT::f64) {
7753     VecVT = MVT::v2i64;
7754     SetVecVal(AArch64::dsub);
7755   } else if (VT == MVT::f32) {
7756     VecVT = MVT::v4i32;
7757     SetVecVal(AArch64::ssub);
7758   } else if (VT == MVT::f16) {
7759     VecVT = MVT::v8i16;
7760     SetVecVal(AArch64::hsub);
7761   } else {
7762     llvm_unreachable("Invalid type for copysign!");
7763   }
7764 
7765   unsigned BitWidth = In1.getScalarValueSizeInBits();
7766   SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT);
7767 
7768   // We want to materialize a mask with every bit but the high bit set, but the
7769   // AdvSIMD immediate moves cannot materialize that in a single instruction for
7770   // 64-bit elements. Instead, materialize all bits set and then negate that.
7771   if (VT == MVT::f64 || VT == MVT::v2f64) {
7772     SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT);
7773     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV);
7774     SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV);
7775     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV);
7776   }
7777 
7778   SDValue BSP =
7779       DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2);
7780   if (VT == MVT::f16)
7781     return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP);
7782   if (VT == MVT::f32)
7783     return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP);
7784   if (VT == MVT::f64)
7785     return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP);
7786 
7787   return BitCast(VT, BSP, DAG);
7788 }
7789 
7790 SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
7791                                                  SelectionDAG &DAG) const {
7792   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
7793           Attribute::NoImplicitFloat))
7794     return SDValue();
7795 
7796   if (!Subtarget->hasNEON())
7797     return SDValue();
7798 
7799   bool IsParity = Op.getOpcode() == ISD::PARITY;
7800 
7801   // While there is no integer popcount instruction, it can
7802   // be more efficiently lowered to the following sequence that uses
7803   // AdvSIMD registers/instructions as long as the copies to/from
7804   // the AdvSIMD registers are cheap.
7805   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
7806   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
7807   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
7808   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
7809   SDValue Val = Op.getOperand(0);
7810   SDLoc DL(Op);
7811   EVT VT = Op.getValueType();
7812 
7813   if (VT == MVT::i32 || VT == MVT::i64) {
7814     if (VT == MVT::i32)
7815       Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
7816     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
7817 
7818     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
7819     SDValue UaddLV = DAG.getNode(
7820         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7821         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7822 
7823     if (IsParity)
7824       UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7825                            DAG.getConstant(1, DL, MVT::i32));
7826 
7827     if (VT == MVT::i64)
7828       UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
7829     return UaddLV;
7830   } else if (VT == MVT::i128) {
7831     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val);
7832 
7833     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val);
7834     SDValue UaddLV = DAG.getNode(
7835         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7836         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7837 
7838     if (IsParity)
7839       UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7840                            DAG.getConstant(1, DL, MVT::i32));
7841 
7842     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV);
7843   }
7844 
7845   assert(!IsParity && "ISD::PARITY of vector types not supported");
7846 
7847   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
7848     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
7849 
7850   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
7851           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
7852          "Unexpected type for custom ctpop lowering");
7853 
7854   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
7855   Val = DAG.getBitcast(VT8Bit, Val);
7856   Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
7857 
7858   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
7859   unsigned EltSize = 8;
7860   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
7861   while (EltSize != VT.getScalarSizeInBits()) {
7862     EltSize *= 2;
7863     NumElts /= 2;
7864     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
7865     Val = DAG.getNode(
7866         ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
7867         DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
7868   }
7869 
7870   return Val;
7871 }
7872 
7873 SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
7874   EVT VT = Op.getValueType();
7875   assert(VT.isScalableVector() ||
7876          useSVEForFixedLengthVectorVT(
7877              VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()));
7878 
7879   SDLoc DL(Op);
7880   SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0));
7881   return DAG.getNode(ISD::CTLZ, DL, VT, RBIT);
7882 }
7883 
7884 SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
7885                                            SelectionDAG &DAG) const {
7886 
7887   EVT VT = Op.getValueType();
7888   SDLoc DL(Op);
7889   unsigned Opcode = Op.getOpcode();
7890   ISD::CondCode CC;
7891   switch (Opcode) {
7892   default:
7893     llvm_unreachable("Wrong instruction");
7894   case ISD::SMAX:
7895     CC = ISD::SETGT;
7896     break;
7897   case ISD::SMIN:
7898     CC = ISD::SETLT;
7899     break;
7900   case ISD::UMAX:
7901     CC = ISD::SETUGT;
7902     break;
7903   case ISD::UMIN:
7904     CC = ISD::SETULT;
7905     break;
7906   }
7907 
7908   if (VT.isScalableVector() ||
7909       useSVEForFixedLengthVectorVT(
7910           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
7911     switch (Opcode) {
7912     default:
7913       llvm_unreachable("Wrong instruction");
7914     case ISD::SMAX:
7915       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
7916     case ISD::SMIN:
7917       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
7918     case ISD::UMAX:
7919       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
7920     case ISD::UMIN:
7921       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
7922     }
7923   }
7924 
7925   SDValue Op0 = Op.getOperand(0);
7926   SDValue Op1 = Op.getOperand(1);
7927   SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
7928   return DAG.getSelect(DL, VT, Cond, Op0, Op1);
7929 }
7930 
7931 SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
7932                                                SelectionDAG &DAG) const {
7933   EVT VT = Op.getValueType();
7934 
7935   if (VT.isScalableVector() ||
7936       useSVEForFixedLengthVectorVT(
7937           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
7938     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU);
7939 
7940   SDLoc DL(Op);
7941   SDValue REVB;
7942   MVT VST;
7943 
7944   switch (VT.getSimpleVT().SimpleTy) {
7945   default:
7946     llvm_unreachable("Invalid type for bitreverse!");
7947 
7948   case MVT::v2i32: {
7949     VST = MVT::v8i8;
7950     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7951 
7952     break;
7953   }
7954 
7955   case MVT::v4i32: {
7956     VST = MVT::v16i8;
7957     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7958 
7959     break;
7960   }
7961 
7962   case MVT::v1i64: {
7963     VST = MVT::v8i8;
7964     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7965 
7966     break;
7967   }
7968 
7969   case MVT::v2i64: {
7970     VST = MVT::v16i8;
7971     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7972 
7973     break;
7974   }
7975   }
7976 
7977   return DAG.getNode(AArch64ISD::NVCAST, DL, VT,
7978                      DAG.getNode(ISD::BITREVERSE, DL, VST, REVB));
7979 }
7980 
7981 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
7982 
7983   if (Op.getValueType().isVector())
7984     return LowerVSETCC(Op, DAG);
7985 
7986   bool IsStrict = Op->isStrictFPOpcode();
7987   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
7988   unsigned OpNo = IsStrict ? 1 : 0;
7989   SDValue Chain;
7990   if (IsStrict)
7991     Chain = Op.getOperand(0);
7992   SDValue LHS = Op.getOperand(OpNo + 0);
7993   SDValue RHS = Op.getOperand(OpNo + 1);
7994   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get();
7995   SDLoc dl(Op);
7996 
7997   // We chose ZeroOrOneBooleanContents, so use zero and one.
7998   EVT VT = Op.getValueType();
7999   SDValue TVal = DAG.getConstant(1, dl, VT);
8000   SDValue FVal = DAG.getConstant(0, dl, VT);
8001 
8002   // Handle f128 first, since one possible outcome is a normal integer
8003   // comparison which gets picked up by the next if statement.
8004   if (LHS.getValueType() == MVT::f128) {
8005     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain,
8006                         IsSignaling);
8007 
8008     // If softenSetCCOperands returned a scalar, use it.
8009     if (!RHS.getNode()) {
8010       assert(LHS.getValueType() == Op.getValueType() &&
8011              "Unexpected setcc expansion!");
8012       return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS;
8013     }
8014   }
8015 
8016   if (LHS.getValueType().isInteger()) {
8017     SDValue CCVal;
8018     SDValue Cmp = getAArch64Cmp(
8019         LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
8020 
8021     // Note that we inverted the condition above, so we reverse the order of
8022     // the true and false operands here.  This will allow the setcc to be
8023     // matched to a single CSINC instruction.
8024     SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
8025     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
8026   }
8027 
8028   // Now we know we're dealing with FP values.
8029   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8030          LHS.getValueType() == MVT::f64);
8031 
8032   // If that fails, we'll need to perform an FCMP + CSEL sequence.  Go ahead
8033   // and do the comparison.
8034   SDValue Cmp;
8035   if (IsStrict)
8036     Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling);
8037   else
8038     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8039 
8040   AArch64CC::CondCode CC1, CC2;
8041   changeFPCCToAArch64CC(CC, CC1, CC2);
8042   SDValue Res;
8043   if (CC2 == AArch64CC::AL) {
8044     changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1,
8045                           CC2);
8046     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8047 
8048     // Note that we inverted the condition above, so we reverse the order of
8049     // the true and false operands here.  This will allow the setcc to be
8050     // matched to a single CSINC instruction.
8051     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
8052   } else {
8053     // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
8054     // totally clean.  Some of them require two CSELs to implement.  As is in
8055     // this case, we emit the first CSEL and then emit a second using the output
8056     // of the first as the RHS.  We're effectively OR'ing the two CC's together.
8057 
8058     // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
8059     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8060     SDValue CS1 =
8061         DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8062 
8063     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8064     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8065   }
8066   return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
8067 }
8068 
8069 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
8070                                               SDValue RHS, SDValue TVal,
8071                                               SDValue FVal, const SDLoc &dl,
8072                                               SelectionDAG &DAG) const {
8073   // Handle f128 first, because it will result in a comparison of some RTLIB
8074   // call result against zero.
8075   if (LHS.getValueType() == MVT::f128) {
8076     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
8077 
8078     // If softenSetCCOperands returned a scalar, we need to compare the result
8079     // against zero to select between true and false values.
8080     if (!RHS.getNode()) {
8081       RHS = DAG.getConstant(0, dl, LHS.getValueType());
8082       CC = ISD::SETNE;
8083     }
8084   }
8085 
8086   // Also handle f16, for which we need to do a f32 comparison.
8087   if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
8088     LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
8089     RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
8090   }
8091 
8092   // Next, handle integers.
8093   if (LHS.getValueType().isInteger()) {
8094     assert((LHS.getValueType() == RHS.getValueType()) &&
8095            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
8096 
8097     ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
8098     ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
8099     ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
8100     // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform
8101     // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
8102     // supported types.
8103     if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal &&
8104         CTVal->isOne() && CFVal->isAllOnes() &&
8105         LHS.getValueType() == TVal.getValueType()) {
8106       EVT VT = LHS.getValueType();
8107       SDValue Shift =
8108           DAG.getNode(ISD::SRA, dl, VT, LHS,
8109                       DAG.getConstant(VT.getSizeInBits() - 1, dl, VT));
8110       return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT));
8111     }
8112 
8113     unsigned Opcode = AArch64ISD::CSEL;
8114 
8115     // If both the TVal and the FVal are constants, see if we can swap them in
8116     // order to for a CSINV or CSINC out of them.
8117     if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) {
8118       std::swap(TVal, FVal);
8119       std::swap(CTVal, CFVal);
8120       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8121     } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) {
8122       std::swap(TVal, FVal);
8123       std::swap(CTVal, CFVal);
8124       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8125     } else if (TVal.getOpcode() == ISD::XOR) {
8126       // If TVal is a NOT we want to swap TVal and FVal so that we can match
8127       // with a CSINV rather than a CSEL.
8128       if (isAllOnesConstant(TVal.getOperand(1))) {
8129         std::swap(TVal, FVal);
8130         std::swap(CTVal, CFVal);
8131         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8132       }
8133     } else if (TVal.getOpcode() == ISD::SUB) {
8134       // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
8135       // that we can match with a CSNEG rather than a CSEL.
8136       if (isNullConstant(TVal.getOperand(0))) {
8137         std::swap(TVal, FVal);
8138         std::swap(CTVal, CFVal);
8139         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8140       }
8141     } else if (CTVal && CFVal) {
8142       const int64_t TrueVal = CTVal->getSExtValue();
8143       const int64_t FalseVal = CFVal->getSExtValue();
8144       bool Swap = false;
8145 
8146       // If both TVal and FVal are constants, see if FVal is the
8147       // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
8148       // instead of a CSEL in that case.
8149       if (TrueVal == ~FalseVal) {
8150         Opcode = AArch64ISD::CSINV;
8151       } else if (FalseVal > std::numeric_limits<int64_t>::min() &&
8152                  TrueVal == -FalseVal) {
8153         Opcode = AArch64ISD::CSNEG;
8154       } else if (TVal.getValueType() == MVT::i32) {
8155         // If our operands are only 32-bit wide, make sure we use 32-bit
8156         // arithmetic for the check whether we can use CSINC. This ensures that
8157         // the addition in the check will wrap around properly in case there is
8158         // an overflow (which would not be the case if we do the check with
8159         // 64-bit arithmetic).
8160         const uint32_t TrueVal32 = CTVal->getZExtValue();
8161         const uint32_t FalseVal32 = CFVal->getZExtValue();
8162 
8163         if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
8164           Opcode = AArch64ISD::CSINC;
8165 
8166           if (TrueVal32 > FalseVal32) {
8167             Swap = true;
8168           }
8169         }
8170         // 64-bit check whether we can use CSINC.
8171       } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
8172         Opcode = AArch64ISD::CSINC;
8173 
8174         if (TrueVal > FalseVal) {
8175           Swap = true;
8176         }
8177       }
8178 
8179       // Swap TVal and FVal if necessary.
8180       if (Swap) {
8181         std::swap(TVal, FVal);
8182         std::swap(CTVal, CFVal);
8183         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8184       }
8185 
8186       if (Opcode != AArch64ISD::CSEL) {
8187         // Drop FVal since we can get its value by simply inverting/negating
8188         // TVal.
8189         FVal = TVal;
8190       }
8191     }
8192 
8193     // Avoid materializing a constant when possible by reusing a known value in
8194     // a register.  However, don't perform this optimization if the known value
8195     // is one, zero or negative one in the case of a CSEL.  We can always
8196     // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the
8197     // FVal, respectively.
8198     ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
8199     if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
8200         !RHSVal->isZero() && !RHSVal->isAllOnes()) {
8201       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8202       // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
8203       // "a != C ? x : a" to avoid materializing C.
8204       if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
8205         TVal = LHS;
8206       else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE)
8207         FVal = LHS;
8208     } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) {
8209       assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
8210       // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
8211       // avoid materializing C.
8212       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8213       if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
8214         Opcode = AArch64ISD::CSINV;
8215         TVal = LHS;
8216         FVal = DAG.getConstant(0, dl, FVal.getValueType());
8217       }
8218     }
8219 
8220     SDValue CCVal;
8221     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
8222     EVT VT = TVal.getValueType();
8223     return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
8224   }
8225 
8226   // Now we know we're dealing with FP values.
8227   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8228          LHS.getValueType() == MVT::f64);
8229   assert(LHS.getValueType() == RHS.getValueType());
8230   EVT VT = TVal.getValueType();
8231   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8232 
8233   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
8234   // clean.  Some of them require two CSELs to implement.
8235   AArch64CC::CondCode CC1, CC2;
8236   changeFPCCToAArch64CC(CC, CC1, CC2);
8237 
8238   if (DAG.getTarget().Options.UnsafeFPMath) {
8239     // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and
8240     // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0.
8241     ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS);
8242     if (RHSVal && RHSVal->isZero()) {
8243       ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal);
8244       ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal);
8245 
8246       if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) &&
8247           CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType())
8248         TVal = LHS;
8249       else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) &&
8250                CFVal && CFVal->isZero() &&
8251                FVal.getValueType() == LHS.getValueType())
8252         FVal = LHS;
8253     }
8254   }
8255 
8256   // Emit first, and possibly only, CSEL.
8257   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8258   SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8259 
8260   // If we need a second CSEL, emit it, using the output of the first as the
8261   // RHS.  We're effectively OR'ing the two CC's together.
8262   if (CC2 != AArch64CC::AL) {
8263     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8264     return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8265   }
8266 
8267   // Otherwise, return the output of the first CSEL.
8268   return CS1;
8269 }
8270 
8271 SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
8272                                                   SelectionDAG &DAG) const {
8273   EVT Ty = Op.getValueType();
8274   auto Idx = Op.getConstantOperandAPInt(2);
8275   int64_t IdxVal = Idx.getSExtValue();
8276   assert(Ty.isScalableVector() &&
8277          "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
8278 
8279   // We can use the splice instruction for certain index values where we are
8280   // able to efficiently generate the correct predicate. The index will be
8281   // inverted and used directly as the input to the ptrue instruction, i.e.
8282   // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
8283   // splice predicate. However, we can only do this if we can guarantee that
8284   // there are enough elements in the vector, hence we check the index <= min
8285   // number of elements.
8286   Optional<unsigned> PredPattern;
8287   if (Ty.isScalableVector() && IdxVal < 0 &&
8288       (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
8289           None) {
8290     SDLoc DL(Op);
8291 
8292     // Create a predicate where all but the last -IdxVal elements are false.
8293     EVT PredVT = Ty.changeVectorElementType(MVT::i1);
8294     SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
8295     Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
8296 
8297     // Now splice the two inputs together using the predicate.
8298     return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
8299                        Op.getOperand(1));
8300   }
8301 
8302   // This will select to an EXT instruction, which has a maximum immediate
8303   // value of 255, hence 2048-bits is the maximum value we can lower.
8304   if (IdxVal >= 0 &&
8305       IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
8306     return Op;
8307 
8308   return SDValue();
8309 }
8310 
8311 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
8312                                               SelectionDAG &DAG) const {
8313   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8314   SDValue LHS = Op.getOperand(0);
8315   SDValue RHS = Op.getOperand(1);
8316   SDValue TVal = Op.getOperand(2);
8317   SDValue FVal = Op.getOperand(3);
8318   SDLoc DL(Op);
8319   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8320 }
8321 
8322 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
8323                                            SelectionDAG &DAG) const {
8324   SDValue CCVal = Op->getOperand(0);
8325   SDValue TVal = Op->getOperand(1);
8326   SDValue FVal = Op->getOperand(2);
8327   SDLoc DL(Op);
8328 
8329   EVT Ty = Op.getValueType();
8330   if (Ty.isScalableVector()) {
8331     SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
8332     MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
8333     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
8334     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8335   }
8336 
8337   if (useSVEForFixedLengthVectorVT(Ty)) {
8338     // FIXME: Ideally this would be the same as above using i1 types, however
8339     // for the moment we can't deal with fixed i1 vector types properly, so
8340     // instead extend the predicate to a result type sized integer vector.
8341     MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits());
8342     MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount());
8343     SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT);
8344     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal);
8345     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8346   }
8347 
8348   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
8349   // instruction.
8350   if (ISD::isOverflowIntrOpRes(CCVal)) {
8351     // Only lower legal XALUO ops.
8352     if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0)))
8353       return SDValue();
8354 
8355     AArch64CC::CondCode OFCC;
8356     SDValue Value, Overflow;
8357     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
8358     SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
8359 
8360     return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
8361                        CCVal, Overflow);
8362   }
8363 
8364   // Lower it the same way as we would lower a SELECT_CC node.
8365   ISD::CondCode CC;
8366   SDValue LHS, RHS;
8367   if (CCVal.getOpcode() == ISD::SETCC) {
8368     LHS = CCVal.getOperand(0);
8369     RHS = CCVal.getOperand(1);
8370     CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get();
8371   } else {
8372     LHS = CCVal;
8373     RHS = DAG.getConstant(0, DL, CCVal.getValueType());
8374     CC = ISD::SETNE;
8375   }
8376   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8377 }
8378 
8379 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
8380                                               SelectionDAG &DAG) const {
8381   // Jump table entries as PC relative offsets. No additional tweaking
8382   // is necessary here. Just get the address of the jump table.
8383   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
8384 
8385   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8386       !Subtarget->isTargetMachO()) {
8387     return getAddrLarge(JT, DAG);
8388   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8389     return getAddrTiny(JT, DAG);
8390   }
8391   return getAddr(JT, DAG);
8392 }
8393 
8394 SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
8395                                           SelectionDAG &DAG) const {
8396   // Jump table entries as PC relative offsets. No additional tweaking
8397   // is necessary here. Just get the address of the jump table.
8398   SDLoc DL(Op);
8399   SDValue JT = Op.getOperand(1);
8400   SDValue Entry = Op.getOperand(2);
8401   int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex();
8402 
8403   auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8404   AFI->setJumpTableEntryInfo(JTI, 4, nullptr);
8405 
8406   SDNode *Dest =
8407       DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT,
8408                          Entry, DAG.getTargetJumpTable(JTI, MVT::i32));
8409   return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0),
8410                      SDValue(Dest, 0));
8411 }
8412 
8413 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
8414                                                  SelectionDAG &DAG) const {
8415   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
8416 
8417   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
8418     // Use the GOT for the large code model on iOS.
8419     if (Subtarget->isTargetMachO()) {
8420       return getGOT(CP, DAG);
8421     }
8422     return getAddrLarge(CP, DAG);
8423   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8424     return getAddrTiny(CP, DAG);
8425   } else {
8426     return getAddr(CP, DAG);
8427   }
8428 }
8429 
8430 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
8431                                                SelectionDAG &DAG) const {
8432   BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
8433   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8434       !Subtarget->isTargetMachO()) {
8435     return getAddrLarge(BA, DAG);
8436   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8437     return getAddrTiny(BA, DAG);
8438   }
8439   return getAddr(BA, DAG);
8440 }
8441 
8442 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
8443                                                  SelectionDAG &DAG) const {
8444   AArch64FunctionInfo *FuncInfo =
8445       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8446 
8447   SDLoc DL(Op);
8448   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(),
8449                                  getPointerTy(DAG.getDataLayout()));
8450   FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout()));
8451   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8452   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8453                       MachinePointerInfo(SV));
8454 }
8455 
8456 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
8457                                                   SelectionDAG &DAG) const {
8458   AArch64FunctionInfo *FuncInfo =
8459       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8460 
8461   SDLoc DL(Op);
8462   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
8463                                      ? FuncInfo->getVarArgsGPRIndex()
8464                                      : FuncInfo->getVarArgsStackIndex(),
8465                                  getPointerTy(DAG.getDataLayout()));
8466   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8467   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8468                       MachinePointerInfo(SV));
8469 }
8470 
8471 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
8472                                                   SelectionDAG &DAG) const {
8473   // The layout of the va_list struct is specified in the AArch64 Procedure Call
8474   // Standard, section B.3.
8475   MachineFunction &MF = DAG.getMachineFunction();
8476   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
8477   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8478   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8479   auto PtrVT = getPointerTy(DAG.getDataLayout());
8480   SDLoc DL(Op);
8481 
8482   SDValue Chain = Op.getOperand(0);
8483   SDValue VAList = Op.getOperand(1);
8484   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8485   SmallVector<SDValue, 4> MemOps;
8486 
8487   // void *__stack at offset 0
8488   unsigned Offset = 0;
8489   SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
8490   Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT);
8491   MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
8492                                 MachinePointerInfo(SV), Align(PtrSize)));
8493 
8494   // void *__gr_top at offset 8 (4 on ILP32)
8495   Offset += PtrSize;
8496   int GPRSize = FuncInfo->getVarArgsGPRSize();
8497   if (GPRSize > 0) {
8498     SDValue GRTop, GRTopAddr;
8499 
8500     GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8501                             DAG.getConstant(Offset, DL, PtrVT));
8502 
8503     GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT);
8504     GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop,
8505                         DAG.getConstant(GPRSize, DL, PtrVT));
8506     GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT);
8507 
8508     MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
8509                                   MachinePointerInfo(SV, Offset),
8510                                   Align(PtrSize)));
8511   }
8512 
8513   // void *__vr_top at offset 16 (8 on ILP32)
8514   Offset += PtrSize;
8515   int FPRSize = FuncInfo->getVarArgsFPRSize();
8516   if (FPRSize > 0) {
8517     SDValue VRTop, VRTopAddr;
8518     VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8519                             DAG.getConstant(Offset, DL, PtrVT));
8520 
8521     VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT);
8522     VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop,
8523                         DAG.getConstant(FPRSize, DL, PtrVT));
8524     VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT);
8525 
8526     MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
8527                                   MachinePointerInfo(SV, Offset),
8528                                   Align(PtrSize)));
8529   }
8530 
8531   // int __gr_offs at offset 24 (12 on ILP32)
8532   Offset += PtrSize;
8533   SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8534                                    DAG.getConstant(Offset, DL, PtrVT));
8535   MemOps.push_back(
8536       DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32),
8537                    GROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8538 
8539   // int __vr_offs at offset 28 (16 on ILP32)
8540   Offset += 4;
8541   SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8542                                    DAG.getConstant(Offset, DL, PtrVT));
8543   MemOps.push_back(
8544       DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32),
8545                    VROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8546 
8547   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
8548 }
8549 
8550 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
8551                                             SelectionDAG &DAG) const {
8552   MachineFunction &MF = DAG.getMachineFunction();
8553 
8554   if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
8555     return LowerWin64_VASTART(Op, DAG);
8556   else if (Subtarget->isTargetDarwin())
8557     return LowerDarwin_VASTART(Op, DAG);
8558   else
8559     return LowerAAPCS_VASTART(Op, DAG);
8560 }
8561 
8562 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
8563                                            SelectionDAG &DAG) const {
8564   // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
8565   // pointer.
8566   SDLoc DL(Op);
8567   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8568   unsigned VaListSize =
8569       (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
8570           ? PtrSize
8571           : Subtarget->isTargetILP32() ? 20 : 32;
8572   const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
8573   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8574 
8575   return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2),
8576                        DAG.getConstant(VaListSize, DL, MVT::i32),
8577                        Align(PtrSize), false, false, false,
8578                        MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
8579 }
8580 
8581 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
8582   assert(Subtarget->isTargetDarwin() &&
8583          "automatic va_arg instruction only works on Darwin");
8584 
8585   const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8586   EVT VT = Op.getValueType();
8587   SDLoc DL(Op);
8588   SDValue Chain = Op.getOperand(0);
8589   SDValue Addr = Op.getOperand(1);
8590   MaybeAlign Align(Op.getConstantOperandVal(3));
8591   unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
8592   auto PtrVT = getPointerTy(DAG.getDataLayout());
8593   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8594   SDValue VAList =
8595       DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V));
8596   Chain = VAList.getValue(1);
8597   VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
8598 
8599   if (VT.isScalableVector())
8600     report_fatal_error("Passing SVE types to variadic functions is "
8601                        "currently not supported");
8602 
8603   if (Align && *Align > MinSlotSize) {
8604     VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8605                          DAG.getConstant(Align->value() - 1, DL, PtrVT));
8606     VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
8607                          DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
8608   }
8609 
8610   Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8611   unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
8612 
8613   // Scalar integer and FP values smaller than 64 bits are implicitly extended
8614   // up to 64 bits.  At the very least, we have to increase the striding of the
8615   // vaargs list to match this, and for FP values we need to introduce
8616   // FP_ROUND nodes as well.
8617   if (VT.isInteger() && !VT.isVector())
8618     ArgSize = std::max(ArgSize, MinSlotSize);
8619   bool NeedFPTrunc = false;
8620   if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
8621     ArgSize = 8;
8622     NeedFPTrunc = true;
8623   }
8624 
8625   // Increment the pointer, VAList, to the next vaarg
8626   SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8627                                DAG.getConstant(ArgSize, DL, PtrVT));
8628   VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT);
8629 
8630   // Store the incremented VAList to the legalized pointer
8631   SDValue APStore =
8632       DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
8633 
8634   // Load the actual argument out of the pointer VAList
8635   if (NeedFPTrunc) {
8636     // Load the value as an f64.
8637     SDValue WideFP =
8638         DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
8639     // Round the value down to an f32.
8640     SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
8641                                    DAG.getIntPtrConstant(1, DL));
8642     SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
8643     // Merge the rounded value with the chain output of the load.
8644     return DAG.getMergeValues(Ops, DL);
8645   }
8646 
8647   return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
8648 }
8649 
8650 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
8651                                               SelectionDAG &DAG) const {
8652   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8653   MFI.setFrameAddressIsTaken(true);
8654 
8655   EVT VT = Op.getValueType();
8656   SDLoc DL(Op);
8657   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8658   SDValue FrameAddr =
8659       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
8660   while (Depth--)
8661     FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
8662                             MachinePointerInfo());
8663 
8664   if (Subtarget->isTargetILP32())
8665     FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr,
8666                             DAG.getValueType(VT));
8667 
8668   return FrameAddr;
8669 }
8670 
8671 SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
8672                                               SelectionDAG &DAG) const {
8673   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8674 
8675   EVT VT = getPointerTy(DAG.getDataLayout());
8676   SDLoc DL(Op);
8677   int FI = MFI.CreateFixedObject(4, 0, false);
8678   return DAG.getFrameIndex(FI, VT);
8679 }
8680 
8681 #define GET_REGISTER_MATCHER
8682 #include "AArch64GenAsmMatcher.inc"
8683 
8684 // FIXME? Maybe this could be a TableGen attribute on some registers and
8685 // this table could be generated automatically from RegInfo.
8686 Register AArch64TargetLowering::
8687 getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {
8688   Register Reg = MatchRegisterName(RegName);
8689   if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
8690     const MCRegisterInfo *MRI = Subtarget->getRegisterInfo();
8691     unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false);
8692     if (!Subtarget->isXRegisterReserved(DwarfRegNum))
8693       Reg = 0;
8694   }
8695   if (Reg)
8696     return Reg;
8697   report_fatal_error(Twine("Invalid register name \""
8698                               + StringRef(RegName)  + "\"."));
8699 }
8700 
8701 SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
8702                                                      SelectionDAG &DAG) const {
8703   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
8704 
8705   EVT VT = Op.getValueType();
8706   SDLoc DL(Op);
8707 
8708   SDValue FrameAddr =
8709       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
8710   SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8711 
8712   return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset);
8713 }
8714 
8715 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
8716                                                SelectionDAG &DAG) const {
8717   MachineFunction &MF = DAG.getMachineFunction();
8718   MachineFrameInfo &MFI = MF.getFrameInfo();
8719   MFI.setReturnAddressIsTaken(true);
8720 
8721   EVT VT = Op.getValueType();
8722   SDLoc DL(Op);
8723   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8724   SDValue ReturnAddress;
8725   if (Depth) {
8726     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
8727     SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8728     ReturnAddress = DAG.getLoad(
8729         VT, DL, DAG.getEntryNode(),
8730         DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo());
8731   } else {
8732     // Return LR, which contains the return address. Mark it an implicit
8733     // live-in.
8734     Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
8735     ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
8736   }
8737 
8738   // The XPACLRI instruction assembles to a hint-space instruction before
8739   // Armv8.3-A therefore this instruction can be safely used for any pre
8740   // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use
8741   // that instead.
8742   SDNode *St;
8743   if (Subtarget->hasPAuth()) {
8744     St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress);
8745   } else {
8746     // XPACLRI operates on LR therefore we must move the operand accordingly.
8747     SDValue Chain =
8748         DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress);
8749     St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain);
8750   }
8751   return SDValue(St, 0);
8752 }
8753 
8754 /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
8755 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
8756 SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
8757                                                SelectionDAG &DAG) const {
8758   SDValue Lo, Hi;
8759   expandShiftParts(Op.getNode(), Lo, Hi, DAG);
8760   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
8761 }
8762 
8763 bool AArch64TargetLowering::isOffsetFoldingLegal(
8764     const GlobalAddressSDNode *GA) const {
8765   // Offsets are folded in the DAG combine rather than here so that we can
8766   // intelligently choose an offset based on the uses.
8767   return false;
8768 }
8769 
8770 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
8771                                          bool OptForSize) const {
8772   bool IsLegal = false;
8773   // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
8774   // 16-bit case when target has full fp16 support.
8775   // FIXME: We should be able to handle f128 as well with a clever lowering.
8776   const APInt ImmInt = Imm.bitcastToAPInt();
8777   if (VT == MVT::f64)
8778     IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
8779   else if (VT == MVT::f32)
8780     IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
8781   else if (VT == MVT::f16 && Subtarget->hasFullFP16())
8782     IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero();
8783   // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to
8784   //       generate that fmov.
8785 
8786   // If we can not materialize in immediate field for fmov, check if the
8787   // value can be encoded as the immediate operand of a logical instruction.
8788   // The immediate value will be created with either MOVZ, MOVN, or ORR.
8789   if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) {
8790     // The cost is actually exactly the same for mov+fmov vs. adrp+ldr;
8791     // however the mov+fmov sequence is always better because of the reduced
8792     // cache pressure. The timings are still the same if you consider
8793     // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
8794     // movw+movk is fused). So we limit up to 2 instrdduction at most.
8795     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
8796     AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(),
8797 			      Insn);
8798     unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2));
8799     IsLegal = Insn.size() <= Limit;
8800   }
8801 
8802   LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()
8803                     << " imm value: "; Imm.dump(););
8804   return IsLegal;
8805 }
8806 
8807 //===----------------------------------------------------------------------===//
8808 //                          AArch64 Optimization Hooks
8809 //===----------------------------------------------------------------------===//
8810 
8811 static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
8812                            SDValue Operand, SelectionDAG &DAG,
8813                            int &ExtraSteps) {
8814   EVT VT = Operand.getValueType();
8815   if ((ST->hasNEON() &&
8816        (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 ||
8817         VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 ||
8818         VT == MVT::v4f32)) ||
8819       (ST->hasSVE() &&
8820        (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) {
8821     if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
8822       // For the reciprocal estimates, convergence is quadratic, so the number
8823       // of digits is doubled after each iteration.  In ARMv8, the accuracy of
8824       // the initial estimate is 2^-8.  Thus the number of extra steps to refine
8825       // the result for float (23 mantissa bits) is 2 and for double (52
8826       // mantissa bits) is 3.
8827       ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2;
8828 
8829     return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
8830   }
8831 
8832   return SDValue();
8833 }
8834 
8835 SDValue
8836 AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
8837                                         const DenormalMode &Mode) const {
8838   SDLoc DL(Op);
8839   EVT VT = Op.getValueType();
8840   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
8841   SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
8842   return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
8843 }
8844 
8845 SDValue
8846 AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
8847                                                    SelectionDAG &DAG) const {
8848   return Op;
8849 }
8850 
8851 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
8852                                                SelectionDAG &DAG, int Enabled,
8853                                                int &ExtraSteps,
8854                                                bool &UseOneConst,
8855                                                bool Reciprocal) const {
8856   if (Enabled == ReciprocalEstimate::Enabled ||
8857       (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt()))
8858     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand,
8859                                        DAG, ExtraSteps)) {
8860       SDLoc DL(Operand);
8861       EVT VT = Operand.getValueType();
8862 
8863       SDNodeFlags Flags;
8864       Flags.setAllowReassociation(true);
8865 
8866       // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2)
8867       // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
8868       for (int i = ExtraSteps; i > 0; --i) {
8869         SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
8870                                    Flags);
8871         Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
8872         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8873       }
8874       if (!Reciprocal)
8875         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
8876 
8877       ExtraSteps = 0;
8878       return Estimate;
8879     }
8880 
8881   return SDValue();
8882 }
8883 
8884 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
8885                                                 SelectionDAG &DAG, int Enabled,
8886                                                 int &ExtraSteps) const {
8887   if (Enabled == ReciprocalEstimate::Enabled)
8888     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand,
8889                                        DAG, ExtraSteps)) {
8890       SDLoc DL(Operand);
8891       EVT VT = Operand.getValueType();
8892 
8893       SDNodeFlags Flags;
8894       Flags.setAllowReassociation(true);
8895 
8896       // Newton reciprocal iteration: E * (2 - X * E)
8897       // AArch64 reciprocal iteration instruction: (2 - M * N)
8898       for (int i = ExtraSteps; i > 0; --i) {
8899         SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
8900                                    Estimate, Flags);
8901         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8902       }
8903 
8904       ExtraSteps = 0;
8905       return Estimate;
8906     }
8907 
8908   return SDValue();
8909 }
8910 
8911 //===----------------------------------------------------------------------===//
8912 //                          AArch64 Inline Assembly Support
8913 //===----------------------------------------------------------------------===//
8914 
8915 // Table of Constraints
8916 // TODO: This is the current set of constraints supported by ARM for the
8917 // compiler, not all of them may make sense.
8918 //
8919 // r - A general register
8920 // w - An FP/SIMD register of some size in the range v0-v31
8921 // x - An FP/SIMD register of some size in the range v0-v15
8922 // I - Constant that can be used with an ADD instruction
8923 // J - Constant that can be used with a SUB instruction
8924 // K - Constant that can be used with a 32-bit logical instruction
8925 // L - Constant that can be used with a 64-bit logical instruction
8926 // M - Constant that can be used as a 32-bit MOV immediate
8927 // N - Constant that can be used as a 64-bit MOV immediate
8928 // Q - A memory reference with base register and no offset
8929 // S - A symbolic address
8930 // Y - Floating point constant zero
8931 // Z - Integer constant zero
8932 //
8933 //   Note that general register operands will be output using their 64-bit x
8934 // register name, whatever the size of the variable, unless the asm operand
8935 // is prefixed by the %w modifier. Floating-point and SIMD register operands
8936 // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
8937 // %q modifier.
8938 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
8939   // At this point, we have to lower this constraint to something else, so we
8940   // lower it to an "r" or "w". However, by doing this we will force the result
8941   // to be in register, while the X constraint is much more permissive.
8942   //
8943   // Although we are correct (we are free to emit anything, without
8944   // constraints), we might break use cases that would expect us to be more
8945   // efficient and emit something else.
8946   if (!Subtarget->hasFPARMv8())
8947     return "r";
8948 
8949   if (ConstraintVT.isFloatingPoint())
8950     return "w";
8951 
8952   if (ConstraintVT.isVector() &&
8953      (ConstraintVT.getSizeInBits() == 64 ||
8954       ConstraintVT.getSizeInBits() == 128))
8955     return "w";
8956 
8957   return "r";
8958 }
8959 
8960 enum PredicateConstraint {
8961   Upl,
8962   Upa,
8963   Invalid
8964 };
8965 
8966 static PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
8967   PredicateConstraint P = PredicateConstraint::Invalid;
8968   if (Constraint == "Upa")
8969     P = PredicateConstraint::Upa;
8970   if (Constraint == "Upl")
8971     P = PredicateConstraint::Upl;
8972   return P;
8973 }
8974 
8975 /// getConstraintType - Given a constraint letter, return the type of
8976 /// constraint it is for this target.
8977 AArch64TargetLowering::ConstraintType
8978 AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
8979   if (Constraint.size() == 1) {
8980     switch (Constraint[0]) {
8981     default:
8982       break;
8983     case 'x':
8984     case 'w':
8985     case 'y':
8986       return C_RegisterClass;
8987     // An address with a single base register. Due to the way we
8988     // currently handle addresses it is the same as 'r'.
8989     case 'Q':
8990       return C_Memory;
8991     case 'I':
8992     case 'J':
8993     case 'K':
8994     case 'L':
8995     case 'M':
8996     case 'N':
8997     case 'Y':
8998     case 'Z':
8999       return C_Immediate;
9000     case 'z':
9001     case 'S': // A symbolic address
9002       return C_Other;
9003     }
9004   } else if (parsePredicateConstraint(Constraint) !=
9005              PredicateConstraint::Invalid)
9006       return C_RegisterClass;
9007   return TargetLowering::getConstraintType(Constraint);
9008 }
9009 
9010 /// Examine constraint type and operand type and determine a weight value.
9011 /// This object must already have been set up with the operand type
9012 /// and the current alternative constraint selected.
9013 TargetLowering::ConstraintWeight
9014 AArch64TargetLowering::getSingleConstraintMatchWeight(
9015     AsmOperandInfo &info, const char *constraint) const {
9016   ConstraintWeight weight = CW_Invalid;
9017   Value *CallOperandVal = info.CallOperandVal;
9018   // If we don't have a value, we can't do a match,
9019   // but allow it at the lowest weight.
9020   if (!CallOperandVal)
9021     return CW_Default;
9022   Type *type = CallOperandVal->getType();
9023   // Look at the constraint type.
9024   switch (*constraint) {
9025   default:
9026     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
9027     break;
9028   case 'x':
9029   case 'w':
9030   case 'y':
9031     if (type->isFloatingPointTy() || type->isVectorTy())
9032       weight = CW_Register;
9033     break;
9034   case 'z':
9035     weight = CW_Constant;
9036     break;
9037   case 'U':
9038     if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
9039       weight = CW_Register;
9040     break;
9041   }
9042   return weight;
9043 }
9044 
9045 std::pair<unsigned, const TargetRegisterClass *>
9046 AArch64TargetLowering::getRegForInlineAsmConstraint(
9047     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
9048   if (Constraint.size() == 1) {
9049     switch (Constraint[0]) {
9050     case 'r':
9051       if (VT.isScalableVector())
9052         return std::make_pair(0U, nullptr);
9053       if (Subtarget->hasLS64() && VT.getSizeInBits() == 512)
9054         return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass);
9055       if (VT.getFixedSizeInBits() == 64)
9056         return std::make_pair(0U, &AArch64::GPR64commonRegClass);
9057       return std::make_pair(0U, &AArch64::GPR32commonRegClass);
9058     case 'w': {
9059       if (!Subtarget->hasFPARMv8())
9060         break;
9061       if (VT.isScalableVector()) {
9062         if (VT.getVectorElementType() != MVT::i1)
9063           return std::make_pair(0U, &AArch64::ZPRRegClass);
9064         return std::make_pair(0U, nullptr);
9065       }
9066       uint64_t VTSize = VT.getFixedSizeInBits();
9067       if (VTSize == 16)
9068         return std::make_pair(0U, &AArch64::FPR16RegClass);
9069       if (VTSize == 32)
9070         return std::make_pair(0U, &AArch64::FPR32RegClass);
9071       if (VTSize == 64)
9072         return std::make_pair(0U, &AArch64::FPR64RegClass);
9073       if (VTSize == 128)
9074         return std::make_pair(0U, &AArch64::FPR128RegClass);
9075       break;
9076     }
9077     // The instructions that this constraint is designed for can
9078     // only take 128-bit registers so just use that regclass.
9079     case 'x':
9080       if (!Subtarget->hasFPARMv8())
9081         break;
9082       if (VT.isScalableVector())
9083         return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
9084       if (VT.getSizeInBits() == 128)
9085         return std::make_pair(0U, &AArch64::FPR128_loRegClass);
9086       break;
9087     case 'y':
9088       if (!Subtarget->hasFPARMv8())
9089         break;
9090       if (VT.isScalableVector())
9091         return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
9092       break;
9093     }
9094   } else {
9095     PredicateConstraint PC = parsePredicateConstraint(Constraint);
9096     if (PC != PredicateConstraint::Invalid) {
9097       if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
9098         return std::make_pair(0U, nullptr);
9099       bool restricted = (PC == PredicateConstraint::Upl);
9100       return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
9101                         : std::make_pair(0U, &AArch64::PPRRegClass);
9102     }
9103   }
9104   if (StringRef("{cc}").equals_insensitive(Constraint))
9105     return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
9106 
9107   // Use the default implementation in TargetLowering to convert the register
9108   // constraint into a member of a register class.
9109   std::pair<unsigned, const TargetRegisterClass *> Res;
9110   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9111 
9112   // Not found as a standard register?
9113   if (!Res.second) {
9114     unsigned Size = Constraint.size();
9115     if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
9116         tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
9117       int RegNo;
9118       bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
9119       if (!Failed && RegNo >= 0 && RegNo <= 31) {
9120         // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
9121         // By default we'll emit v0-v31 for this unless there's a modifier where
9122         // we'll emit the correct register as well.
9123         if (VT != MVT::Other && VT.getSizeInBits() == 64) {
9124           Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
9125           Res.second = &AArch64::FPR64RegClass;
9126         } else {
9127           Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
9128           Res.second = &AArch64::FPR128RegClass;
9129         }
9130       }
9131     }
9132   }
9133 
9134   if (Res.second && !Subtarget->hasFPARMv8() &&
9135       !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
9136       !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
9137     return std::make_pair(0U, nullptr);
9138 
9139   return Res;
9140 }
9141 
9142 EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
9143                                                   llvm::Type *Ty,
9144                                                   bool AllowUnknown) const {
9145   if (Subtarget->hasLS64() && Ty->isIntegerTy(512))
9146     return EVT(MVT::i64x8);
9147 
9148   return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
9149 }
9150 
9151 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
9152 /// vector.  If it is invalid, don't add anything to Ops.
9153 void AArch64TargetLowering::LowerAsmOperandForConstraint(
9154     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9155     SelectionDAG &DAG) const {
9156   SDValue Result;
9157 
9158   // Currently only support length 1 constraints.
9159   if (Constraint.length() != 1)
9160     return;
9161 
9162   char ConstraintLetter = Constraint[0];
9163   switch (ConstraintLetter) {
9164   default:
9165     break;
9166 
9167   // This set of constraints deal with valid constants for various instructions.
9168   // Validate and return a target constant for them if we can.
9169   case 'z': {
9170     // 'z' maps to xzr or wzr so it needs an input of 0.
9171     if (!isNullConstant(Op))
9172       return;
9173 
9174     if (Op.getValueType() == MVT::i64)
9175       Result = DAG.getRegister(AArch64::XZR, MVT::i64);
9176     else
9177       Result = DAG.getRegister(AArch64::WZR, MVT::i32);
9178     break;
9179   }
9180   case 'S': {
9181     // An absolute symbolic address or label reference.
9182     if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9183       Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9184                                           GA->getValueType(0));
9185     } else if (const BlockAddressSDNode *BA =
9186                    dyn_cast<BlockAddressSDNode>(Op)) {
9187       Result =
9188           DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0));
9189     } else
9190       return;
9191     break;
9192   }
9193 
9194   case 'I':
9195   case 'J':
9196   case 'K':
9197   case 'L':
9198   case 'M':
9199   case 'N':
9200     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
9201     if (!C)
9202       return;
9203 
9204     // Grab the value and do some validation.
9205     uint64_t CVal = C->getZExtValue();
9206     switch (ConstraintLetter) {
9207     // The I constraint applies only to simple ADD or SUB immediate operands:
9208     // i.e. 0 to 4095 with optional shift by 12
9209     // The J constraint applies only to ADD or SUB immediates that would be
9210     // valid when negated, i.e. if [an add pattern] were to be output as a SUB
9211     // instruction [or vice versa], in other words -1 to -4095 with optional
9212     // left shift by 12.
9213     case 'I':
9214       if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
9215         break;
9216       return;
9217     case 'J': {
9218       uint64_t NVal = -C->getSExtValue();
9219       if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
9220         CVal = C->getSExtValue();
9221         break;
9222       }
9223       return;
9224     }
9225     // The K and L constraints apply *only* to logical immediates, including
9226     // what used to be the MOVI alias for ORR (though the MOVI alias has now
9227     // been removed and MOV should be used). So these constraints have to
9228     // distinguish between bit patterns that are valid 32-bit or 64-bit
9229     // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
9230     // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
9231     // versa.
9232     case 'K':
9233       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9234         break;
9235       return;
9236     case 'L':
9237       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9238         break;
9239       return;
9240     // The M and N constraints are a superset of K and L respectively, for use
9241     // with the MOV (immediate) alias. As well as the logical immediates they
9242     // also match 32 or 64-bit immediates that can be loaded either using a
9243     // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
9244     // (M) or 64-bit 0x1234000000000000 (N) etc.
9245     // As a note some of this code is liberally stolen from the asm parser.
9246     case 'M': {
9247       if (!isUInt<32>(CVal))
9248         return;
9249       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9250         break;
9251       if ((CVal & 0xFFFF) == CVal)
9252         break;
9253       if ((CVal & 0xFFFF0000ULL) == CVal)
9254         break;
9255       uint64_t NCVal = ~(uint32_t)CVal;
9256       if ((NCVal & 0xFFFFULL) == NCVal)
9257         break;
9258       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9259         break;
9260       return;
9261     }
9262     case 'N': {
9263       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9264         break;
9265       if ((CVal & 0xFFFFULL) == CVal)
9266         break;
9267       if ((CVal & 0xFFFF0000ULL) == CVal)
9268         break;
9269       if ((CVal & 0xFFFF00000000ULL) == CVal)
9270         break;
9271       if ((CVal & 0xFFFF000000000000ULL) == CVal)
9272         break;
9273       uint64_t NCVal = ~CVal;
9274       if ((NCVal & 0xFFFFULL) == NCVal)
9275         break;
9276       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9277         break;
9278       if ((NCVal & 0xFFFF00000000ULL) == NCVal)
9279         break;
9280       if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
9281         break;
9282       return;
9283     }
9284     default:
9285       return;
9286     }
9287 
9288     // All assembler immediates are 64-bit integers.
9289     Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
9290     break;
9291   }
9292 
9293   if (Result.getNode()) {
9294     Ops.push_back(Result);
9295     return;
9296   }
9297 
9298   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9299 }
9300 
9301 //===----------------------------------------------------------------------===//
9302 //                     AArch64 Advanced SIMD Support
9303 //===----------------------------------------------------------------------===//
9304 
9305 /// WidenVector - Given a value in the V64 register class, produce the
9306 /// equivalent value in the V128 register class.
9307 static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
9308   EVT VT = V64Reg.getValueType();
9309   unsigned NarrowSize = VT.getVectorNumElements();
9310   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9311   MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
9312   SDLoc DL(V64Reg);
9313 
9314   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
9315                      V64Reg, DAG.getConstant(0, DL, MVT::i64));
9316 }
9317 
9318 /// getExtFactor - Determine the adjustment factor for the position when
9319 /// generating an "extract from vector registers" instruction.
9320 static unsigned getExtFactor(SDValue &V) {
9321   EVT EltType = V.getValueType().getVectorElementType();
9322   return EltType.getSizeInBits() / 8;
9323 }
9324 
9325 /// NarrowVector - Given a value in the V128 register class, produce the
9326 /// equivalent value in the V64 register class.
9327 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
9328   EVT VT = V128Reg.getValueType();
9329   unsigned WideSize = VT.getVectorNumElements();
9330   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9331   MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
9332   SDLoc DL(V128Reg);
9333 
9334   return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
9335 }
9336 
9337 // Gather data to see if the operation can be modelled as a
9338 // shuffle in combination with VEXTs.
9339 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
9340                                                   SelectionDAG &DAG) const {
9341   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
9342   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
9343   SDLoc dl(Op);
9344   EVT VT = Op.getValueType();
9345   assert(!VT.isScalableVector() &&
9346          "Scalable vectors cannot be used with ISD::BUILD_VECTOR");
9347   unsigned NumElts = VT.getVectorNumElements();
9348 
9349   struct ShuffleSourceInfo {
9350     SDValue Vec;
9351     unsigned MinElt;
9352     unsigned MaxElt;
9353 
9354     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
9355     // be compatible with the shuffle we intend to construct. As a result
9356     // ShuffleVec will be some sliding window into the original Vec.
9357     SDValue ShuffleVec;
9358 
9359     // Code should guarantee that element i in Vec starts at element "WindowBase
9360     // + i * WindowScale in ShuffleVec".
9361     int WindowBase;
9362     int WindowScale;
9363 
9364     ShuffleSourceInfo(SDValue Vec)
9365       : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
9366           ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
9367 
9368     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
9369   };
9370 
9371   // First gather all vectors used as an immediate source for this BUILD_VECTOR
9372   // node.
9373   SmallVector<ShuffleSourceInfo, 2> Sources;
9374   for (unsigned i = 0; i < NumElts; ++i) {
9375     SDValue V = Op.getOperand(i);
9376     if (V.isUndef())
9377       continue;
9378     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9379              !isa<ConstantSDNode>(V.getOperand(1)) ||
9380              V.getOperand(0).getValueType().isScalableVector()) {
9381       LLVM_DEBUG(
9382           dbgs() << "Reshuffle failed: "
9383                     "a shuffle can only come from building a vector from "
9384                     "various elements of other fixed-width vectors, provided "
9385                     "their indices are constant\n");
9386       return SDValue();
9387     }
9388 
9389     // Add this element source to the list if it's not already there.
9390     SDValue SourceVec = V.getOperand(0);
9391     auto Source = find(Sources, SourceVec);
9392     if (Source == Sources.end())
9393       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
9394 
9395     // Update the minimum and maximum lane number seen.
9396     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
9397     Source->MinElt = std::min(Source->MinElt, EltNo);
9398     Source->MaxElt = std::max(Source->MaxElt, EltNo);
9399   }
9400 
9401   // If we have 3 or 4 sources, try to generate a TBL, which will at least be
9402   // better than moving to/from gpr registers for larger vectors.
9403   if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) {
9404     // Construct a mask for the tbl. We may need to adjust the index for types
9405     // larger than i8.
9406     SmallVector<unsigned, 16> Mask;
9407     unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
9408     for (unsigned I = 0; I < NumElts; ++I) {
9409       SDValue V = Op.getOperand(I);
9410       if (V.isUndef()) {
9411         for (unsigned OF = 0; OF < OutputFactor; OF++)
9412           Mask.push_back(-1);
9413         continue;
9414       }
9415       // Set the Mask lanes adjusted for the size of the input and output
9416       // lanes. The Mask is always i8, so it will set OutputFactor lanes per
9417       // output element, adjusted in their positions per input and output types.
9418       unsigned Lane = V.getConstantOperandVal(1);
9419       for (unsigned S = 0; S < Sources.size(); S++) {
9420         if (V.getOperand(0) == Sources[S].Vec) {
9421           unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits();
9422           unsigned InputBase = 16 * S + Lane * InputSize / 8;
9423           for (unsigned OF = 0; OF < OutputFactor; OF++)
9424             Mask.push_back(InputBase + OF);
9425           break;
9426         }
9427       }
9428     }
9429 
9430     // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to
9431     // v16i8, and the TBLMask
9432     SmallVector<SDValue, 16> TBLOperands;
9433     TBLOperands.push_back(DAG.getConstant(Sources.size() == 3
9434                                               ? Intrinsic::aarch64_neon_tbl3
9435                                               : Intrinsic::aarch64_neon_tbl4,
9436                                           dl, MVT::i32));
9437     for (unsigned i = 0; i < Sources.size(); i++) {
9438       SDValue Src = Sources[i].Vec;
9439       EVT SrcVT = Src.getValueType();
9440       Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src);
9441       assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&
9442              "Expected a legally typed vector");
9443       if (SrcVT.is64BitVector())
9444         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src,
9445                           DAG.getUNDEF(MVT::v8i8));
9446       TBLOperands.push_back(Src);
9447     }
9448 
9449     SmallVector<SDValue, 16> TBLMask;
9450     for (unsigned i = 0; i < Mask.size(); i++)
9451       TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32));
9452     assert((Mask.size() == 8 || Mask.size() == 16) &&
9453            "Expected a v8i8 or v16i8 Mask");
9454     TBLOperands.push_back(
9455         DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask));
9456 
9457     SDValue Shuffle =
9458         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
9459                     Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands);
9460     return DAG.getBitcast(VT, Shuffle);
9461   }
9462 
9463   if (Sources.size() > 2) {
9464     LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "
9465                       << "sensible when at most two source vectors are "
9466                       << "involved\n");
9467     return SDValue();
9468   }
9469 
9470   // Find out the smallest element size among result and two sources, and use
9471   // it as element size to build the shuffle_vector.
9472   EVT SmallestEltTy = VT.getVectorElementType();
9473   for (auto &Source : Sources) {
9474     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
9475     if (SrcEltTy.bitsLT(SmallestEltTy)) {
9476       SmallestEltTy = SrcEltTy;
9477     }
9478   }
9479   unsigned ResMultiplier =
9480       VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9481   uint64_t VTSize = VT.getFixedSizeInBits();
9482   NumElts = VTSize / SmallestEltTy.getFixedSizeInBits();
9483   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
9484 
9485   // If the source vector is too wide or too narrow, we may nevertheless be able
9486   // to construct a compatible shuffle either by concatenating it with UNDEF or
9487   // extracting a suitable range of elements.
9488   for (auto &Src : Sources) {
9489     EVT SrcVT = Src.ShuffleVec.getValueType();
9490 
9491     TypeSize SrcVTSize = SrcVT.getSizeInBits();
9492     if (SrcVTSize == TypeSize::Fixed(VTSize))
9493       continue;
9494 
9495     // This stage of the search produces a source with the same element type as
9496     // the original, but with a total width matching the BUILD_VECTOR output.
9497     EVT EltVT = SrcVT.getVectorElementType();
9498     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
9499     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
9500 
9501     if (SrcVTSize.getFixedValue() < VTSize) {
9502       assert(2 * SrcVTSize == VTSize);
9503       // We can pad out the smaller vector for free, so if it's part of a
9504       // shuffle...
9505       Src.ShuffleVec =
9506           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
9507                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
9508       continue;
9509     }
9510 
9511     if (SrcVTSize.getFixedValue() != 2 * VTSize) {
9512       LLVM_DEBUG(
9513           dbgs() << "Reshuffle failed: result vector too small to extract\n");
9514       return SDValue();
9515     }
9516 
9517     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
9518       LLVM_DEBUG(
9519           dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
9520       return SDValue();
9521     }
9522 
9523     if (Src.MinElt >= NumSrcElts) {
9524       // The extraction can just take the second half
9525       Src.ShuffleVec =
9526           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9527                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9528       Src.WindowBase = -NumSrcElts;
9529     } else if (Src.MaxElt < NumSrcElts) {
9530       // The extraction can just take the first half
9531       Src.ShuffleVec =
9532           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9533                       DAG.getConstant(0, dl, MVT::i64));
9534     } else {
9535       // An actual VEXT is needed
9536       SDValue VEXTSrc1 =
9537           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9538                       DAG.getConstant(0, dl, MVT::i64));
9539       SDValue VEXTSrc2 =
9540           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9541                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9542       unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
9543 
9544       if (!SrcVT.is64BitVector()) {
9545         LLVM_DEBUG(
9546           dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "
9547                     "for SVE vectors.");
9548         return SDValue();
9549       }
9550 
9551       Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
9552                                    VEXTSrc2,
9553                                    DAG.getConstant(Imm, dl, MVT::i32));
9554       Src.WindowBase = -Src.MinElt;
9555     }
9556   }
9557 
9558   // Another possible incompatibility occurs from the vector element types. We
9559   // can fix this by bitcasting the source vectors to the same type we intend
9560   // for the shuffle.
9561   for (auto &Src : Sources) {
9562     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
9563     if (SrcEltTy == SmallestEltTy)
9564       continue;
9565     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
9566     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
9567     Src.WindowScale =
9568         SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9569     Src.WindowBase *= Src.WindowScale;
9570   }
9571 
9572   // Final check before we try to actually produce a shuffle.
9573   LLVM_DEBUG(for (auto Src
9574                   : Sources)
9575                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
9576 
9577   // The stars all align, our next step is to produce the mask for the shuffle.
9578   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
9579   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
9580   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
9581     SDValue Entry = Op.getOperand(i);
9582     if (Entry.isUndef())
9583       continue;
9584 
9585     auto Src = find(Sources, Entry.getOperand(0));
9586     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
9587 
9588     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
9589     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
9590     // segment.
9591     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
9592     int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
9593                                VT.getScalarSizeInBits());
9594     int LanesDefined = BitsDefined / BitsPerShuffleLane;
9595 
9596     // This source is expected to fill ResMultiplier lanes of the final shuffle,
9597     // starting at the appropriate offset.
9598     int *LaneMask = &Mask[i * ResMultiplier];
9599 
9600     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
9601     ExtractBase += NumElts * (Src - Sources.begin());
9602     for (int j = 0; j < LanesDefined; ++j)
9603       LaneMask[j] = ExtractBase + j;
9604   }
9605 
9606   // Final check before we try to produce nonsense...
9607   if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
9608     LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
9609     return SDValue();
9610   }
9611 
9612   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
9613   for (unsigned i = 0; i < Sources.size(); ++i)
9614     ShuffleOps[i] = Sources[i].ShuffleVec;
9615 
9616   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
9617                                          ShuffleOps[1], Mask);
9618   SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
9619 
9620   LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
9621              dbgs() << "Reshuffle, creating node: "; V.dump(););
9622 
9623   return V;
9624 }
9625 
9626 // check if an EXT instruction can handle the shuffle mask when the
9627 // vector sources of the shuffle are the same.
9628 static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
9629   unsigned NumElts = VT.getVectorNumElements();
9630 
9631   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
9632   if (M[0] < 0)
9633     return false;
9634 
9635   Imm = M[0];
9636 
9637   // If this is a VEXT shuffle, the immediate value is the index of the first
9638   // element.  The other shuffle indices must be the successive elements after
9639   // the first one.
9640   unsigned ExpectedElt = Imm;
9641   for (unsigned i = 1; i < NumElts; ++i) {
9642     // Increment the expected index.  If it wraps around, just follow it
9643     // back to index zero and keep going.
9644     ++ExpectedElt;
9645     if (ExpectedElt == NumElts)
9646       ExpectedElt = 0;
9647 
9648     if (M[i] < 0)
9649       continue; // ignore UNDEF indices
9650     if (ExpectedElt != static_cast<unsigned>(M[i]))
9651       return false;
9652   }
9653 
9654   return true;
9655 }
9656 
9657 // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
9658 // v4i32s. This is really a truncate, which we can construct out of (legal)
9659 // concats and truncate nodes.
9660 static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {
9661   if (V.getValueType() != MVT::v16i8)
9662     return SDValue();
9663   assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR");
9664 
9665   for (unsigned X = 0; X < 4; X++) {
9666     // Check the first item in each group is an extract from lane 0 of a v4i32
9667     // or v4i16.
9668     SDValue BaseExt = V.getOperand(X * 4);
9669     if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9670         (BaseExt.getOperand(0).getValueType() != MVT::v4i16 &&
9671          BaseExt.getOperand(0).getValueType() != MVT::v4i32) ||
9672         !isa<ConstantSDNode>(BaseExt.getOperand(1)) ||
9673         BaseExt.getConstantOperandVal(1) != 0)
9674       return SDValue();
9675     SDValue Base = BaseExt.getOperand(0);
9676     // And check the other items are extracts from the same vector.
9677     for (unsigned Y = 1; Y < 4; Y++) {
9678       SDValue Ext = V.getOperand(X * 4 + Y);
9679       if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9680           Ext.getOperand(0) != Base ||
9681           !isa<ConstantSDNode>(Ext.getOperand(1)) ||
9682           Ext.getConstantOperandVal(1) != Y)
9683         return SDValue();
9684     }
9685   }
9686 
9687   // Turn the buildvector into a series of truncates and concates, which will
9688   // become uzip1's. Any v4i32s we found get truncated to v4i16, which are
9689   // concat together to produce 2 v8i16. These are both truncated and concat
9690   // together.
9691   SDLoc DL(V);
9692   SDValue Trunc[4] = {
9693       V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0),
9694       V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)};
9695   for (int I = 0; I < 4; I++)
9696     if (Trunc[I].getValueType() == MVT::v4i32)
9697       Trunc[I] = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, Trunc[I]);
9698   SDValue Concat0 =
9699       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]);
9700   SDValue Concat1 =
9701       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]);
9702   SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0);
9703   SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1);
9704   return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1);
9705 }
9706 
9707 /// Check if a vector shuffle corresponds to a DUP instructions with a larger
9708 /// element width than the vector lane type. If that is the case the function
9709 /// returns true and writes the value of the DUP instruction lane operand into
9710 /// DupLaneOp
9711 static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
9712                           unsigned &DupLaneOp) {
9713   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9714          "Only possible block sizes for wide DUP are: 16, 32, 64");
9715 
9716   if (BlockSize <= VT.getScalarSizeInBits())
9717     return false;
9718   if (BlockSize % VT.getScalarSizeInBits() != 0)
9719     return false;
9720   if (VT.getSizeInBits() % BlockSize != 0)
9721     return false;
9722 
9723   size_t SingleVecNumElements = VT.getVectorNumElements();
9724   size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits();
9725   size_t NumBlocks = VT.getSizeInBits() / BlockSize;
9726 
9727   // We are looking for masks like
9728   // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element
9729   // might be replaced by 'undefined'. BlockIndices will eventually contain
9730   // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7]
9731   // for the above examples)
9732   SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1);
9733   for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++)
9734     for (size_t I = 0; I < NumEltsPerBlock; I++) {
9735       int Elt = M[BlockIndex * NumEltsPerBlock + I];
9736       if (Elt < 0)
9737         continue;
9738       // For now we don't support shuffles that use the second operand
9739       if ((unsigned)Elt >= SingleVecNumElements)
9740         return false;
9741       if (BlockElts[I] < 0)
9742         BlockElts[I] = Elt;
9743       else if (BlockElts[I] != Elt)
9744         return false;
9745     }
9746 
9747   // We found a candidate block (possibly with some undefs). It must be a
9748   // sequence of consecutive integers starting with a value divisible by
9749   // NumEltsPerBlock with some values possibly replaced by undef-s.
9750 
9751   // Find first non-undef element
9752   auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; });
9753   assert(FirstRealEltIter != BlockElts.end() &&
9754          "Shuffle with all-undefs must have been caught by previous cases, "
9755          "e.g. isSplat()");
9756   if (FirstRealEltIter == BlockElts.end()) {
9757     DupLaneOp = 0;
9758     return true;
9759   }
9760 
9761   // Index of FirstRealElt in BlockElts
9762   size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin();
9763 
9764   if ((unsigned)*FirstRealEltIter < FirstRealIndex)
9765     return false;
9766   // BlockElts[0] must have the following value if it isn't undef:
9767   size_t Elt0 = *FirstRealEltIter - FirstRealIndex;
9768 
9769   // Check the first element
9770   if (Elt0 % NumEltsPerBlock != 0)
9771     return false;
9772   // Check that the sequence indeed consists of consecutive integers (modulo
9773   // undefs)
9774   for (size_t I = 0; I < NumEltsPerBlock; I++)
9775     if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I)
9776       return false;
9777 
9778   DupLaneOp = Elt0 / NumEltsPerBlock;
9779   return true;
9780 }
9781 
9782 // check if an EXT instruction can handle the shuffle mask when the
9783 // vector sources of the shuffle are different.
9784 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
9785                       unsigned &Imm) {
9786   // Look for the first non-undef element.
9787   const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
9788 
9789   // Benefit form APInt to handle overflow when calculating expected element.
9790   unsigned NumElts = VT.getVectorNumElements();
9791   unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
9792   APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
9793   // The following shuffle indices must be the successive elements after the
9794   // first real element.
9795   const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(),
9796       [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;});
9797   if (FirstWrongElt != M.end())
9798     return false;
9799 
9800   // The index of an EXT is the first element if it is not UNDEF.
9801   // Watch out for the beginning UNDEFs. The EXT index should be the expected
9802   // value of the first element.  E.g.
9803   // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
9804   // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
9805   // ExpectedElt is the last mask index plus 1.
9806   Imm = ExpectedElt.getZExtValue();
9807 
9808   // There are two difference cases requiring to reverse input vectors.
9809   // For example, for vector <4 x i32> we have the following cases,
9810   // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
9811   // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
9812   // For both cases, we finally use mask <5, 6, 7, 0>, which requires
9813   // to reverse two input vectors.
9814   if (Imm < NumElts)
9815     ReverseEXT = true;
9816   else
9817     Imm -= NumElts;
9818 
9819   return true;
9820 }
9821 
9822 /// isREVMask - Check if a vector shuffle corresponds to a REV
9823 /// instruction with the specified blocksize.  (The order of the elements
9824 /// within each block of the vector is reversed.)
9825 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
9826   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9827          "Only possible block sizes for REV are: 16, 32, 64");
9828 
9829   unsigned EltSz = VT.getScalarSizeInBits();
9830   if (EltSz == 64)
9831     return false;
9832 
9833   unsigned NumElts = VT.getVectorNumElements();
9834   unsigned BlockElts = M[0] + 1;
9835   // If the first shuffle index is UNDEF, be optimistic.
9836   if (M[0] < 0)
9837     BlockElts = BlockSize / EltSz;
9838 
9839   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
9840     return false;
9841 
9842   for (unsigned i = 0; i < NumElts; ++i) {
9843     if (M[i] < 0)
9844       continue; // ignore UNDEF indices
9845     if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
9846       return false;
9847   }
9848 
9849   return true;
9850 }
9851 
9852 static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9853   unsigned NumElts = VT.getVectorNumElements();
9854   if (NumElts % 2 != 0)
9855     return false;
9856   WhichResult = (M[0] == 0 ? 0 : 1);
9857   unsigned Idx = WhichResult * NumElts / 2;
9858   for (unsigned i = 0; i != NumElts; i += 2) {
9859     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9860         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
9861       return false;
9862     Idx += 1;
9863   }
9864 
9865   return true;
9866 }
9867 
9868 static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9869   unsigned NumElts = VT.getVectorNumElements();
9870   WhichResult = (M[0] == 0 ? 0 : 1);
9871   for (unsigned i = 0; i != NumElts; ++i) {
9872     if (M[i] < 0)
9873       continue; // ignore UNDEF indices
9874     if ((unsigned)M[i] != 2 * i + WhichResult)
9875       return false;
9876   }
9877 
9878   return true;
9879 }
9880 
9881 static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9882   unsigned NumElts = VT.getVectorNumElements();
9883   if (NumElts % 2 != 0)
9884     return false;
9885   WhichResult = (M[0] == 0 ? 0 : 1);
9886   for (unsigned i = 0; i < NumElts; i += 2) {
9887     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9888         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
9889       return false;
9890   }
9891   return true;
9892 }
9893 
9894 /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
9895 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9896 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
9897 static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9898   unsigned NumElts = VT.getVectorNumElements();
9899   if (NumElts % 2 != 0)
9900     return false;
9901   WhichResult = (M[0] == 0 ? 0 : 1);
9902   unsigned Idx = WhichResult * NumElts / 2;
9903   for (unsigned i = 0; i != NumElts; i += 2) {
9904     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9905         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
9906       return false;
9907     Idx += 1;
9908   }
9909 
9910   return true;
9911 }
9912 
9913 /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
9914 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9915 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
9916 static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9917   unsigned Half = VT.getVectorNumElements() / 2;
9918   WhichResult = (M[0] == 0 ? 0 : 1);
9919   for (unsigned j = 0; j != 2; ++j) {
9920     unsigned Idx = WhichResult;
9921     for (unsigned i = 0; i != Half; ++i) {
9922       int MIdx = M[i + j * Half];
9923       if (MIdx >= 0 && (unsigned)MIdx != Idx)
9924         return false;
9925       Idx += 2;
9926     }
9927   }
9928 
9929   return true;
9930 }
9931 
9932 /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
9933 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9934 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
9935 static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9936   unsigned NumElts = VT.getVectorNumElements();
9937   if (NumElts % 2 != 0)
9938     return false;
9939   WhichResult = (M[0] == 0 ? 0 : 1);
9940   for (unsigned i = 0; i < NumElts; i += 2) {
9941     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9942         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
9943       return false;
9944   }
9945   return true;
9946 }
9947 
9948 static bool isINSMask(ArrayRef<int> M, int NumInputElements,
9949                       bool &DstIsLeft, int &Anomaly) {
9950   if (M.size() != static_cast<size_t>(NumInputElements))
9951     return false;
9952 
9953   int NumLHSMatch = 0, NumRHSMatch = 0;
9954   int LastLHSMismatch = -1, LastRHSMismatch = -1;
9955 
9956   for (int i = 0; i < NumInputElements; ++i) {
9957     if (M[i] == -1) {
9958       ++NumLHSMatch;
9959       ++NumRHSMatch;
9960       continue;
9961     }
9962 
9963     if (M[i] == i)
9964       ++NumLHSMatch;
9965     else
9966       LastLHSMismatch = i;
9967 
9968     if (M[i] == i + NumInputElements)
9969       ++NumRHSMatch;
9970     else
9971       LastRHSMismatch = i;
9972   }
9973 
9974   if (NumLHSMatch == NumInputElements - 1) {
9975     DstIsLeft = true;
9976     Anomaly = LastLHSMismatch;
9977     return true;
9978   } else if (NumRHSMatch == NumInputElements - 1) {
9979     DstIsLeft = false;
9980     Anomaly = LastRHSMismatch;
9981     return true;
9982   }
9983 
9984   return false;
9985 }
9986 
9987 static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
9988   if (VT.getSizeInBits() != 128)
9989     return false;
9990 
9991   unsigned NumElts = VT.getVectorNumElements();
9992 
9993   for (int I = 0, E = NumElts / 2; I != E; I++) {
9994     if (Mask[I] != I)
9995       return false;
9996   }
9997 
9998   int Offset = NumElts / 2;
9999   for (int I = NumElts / 2, E = NumElts; I != E; I++) {
10000     if (Mask[I] != I + SplitLHS * Offset)
10001       return false;
10002   }
10003 
10004   return true;
10005 }
10006 
10007 static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
10008   SDLoc DL(Op);
10009   EVT VT = Op.getValueType();
10010   SDValue V0 = Op.getOperand(0);
10011   SDValue V1 = Op.getOperand(1);
10012   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10013 
10014   if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
10015       VT.getVectorElementType() != V1.getValueType().getVectorElementType())
10016     return SDValue();
10017 
10018   bool SplitV0 = V0.getValueSizeInBits() == 128;
10019 
10020   if (!isConcatMask(Mask, VT, SplitV0))
10021     return SDValue();
10022 
10023   EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
10024   if (SplitV0) {
10025     V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
10026                      DAG.getConstant(0, DL, MVT::i64));
10027   }
10028   if (V1.getValueSizeInBits() == 128) {
10029     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
10030                      DAG.getConstant(0, DL, MVT::i64));
10031   }
10032   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
10033 }
10034 
10035 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
10036 /// the specified operations to build the shuffle. ID is the perfect-shuffle
10037 //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
10038 //table entry and LHS/RHS are the immediate inputs for this stage of the
10039 //shuffle.
10040 static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
10041                                       SDValue V2, unsigned PFEntry, SDValue LHS,
10042                                       SDValue RHS, SelectionDAG &DAG,
10043                                       const SDLoc &dl) {
10044   unsigned OpNum = (PFEntry >> 26) & 0x0F;
10045   unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
10046   unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
10047 
10048   enum {
10049     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
10050     OP_VREV,
10051     OP_VDUP0,
10052     OP_VDUP1,
10053     OP_VDUP2,
10054     OP_VDUP3,
10055     OP_VEXT1,
10056     OP_VEXT2,
10057     OP_VEXT3,
10058     OP_VUZPL,  // VUZP, left result
10059     OP_VUZPR,  // VUZP, right result
10060     OP_VZIPL,  // VZIP, left result
10061     OP_VZIPR,  // VZIP, right result
10062     OP_VTRNL,  // VTRN, left result
10063     OP_VTRNR,  // VTRN, right result
10064     OP_MOVLANE // Move lane. RHSID is the lane to move into
10065   };
10066 
10067   if (OpNum == OP_COPY) {
10068     if (LHSID == (1 * 9 + 2) * 9 + 3)
10069       return LHS;
10070     assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
10071     return RHS;
10072   }
10073 
10074   if (OpNum == OP_MOVLANE) {
10075     // Decompose a PerfectShuffle ID to get the Mask for lane Elt
10076     auto getPFIDLane = [](unsigned ID, int Elt) -> int {
10077       assert(Elt < 4 && "Expected Perfect Lanes to be less than 4");
10078       Elt = 3 - Elt;
10079       while (Elt > 0) {
10080         ID /= 9;
10081         Elt--;
10082       }
10083       return (ID % 9 == 8) ? -1 : ID % 9;
10084     };
10085 
10086     // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
10087     // get the lane to move from from the PFID, which is always from the
10088     // original vectors (V1 or V2).
10089     SDValue OpLHS = GeneratePerfectShuffle(
10090         LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
10091     EVT VT = OpLHS.getValueType();
10092     assert(RHSID < 8 && "Expected a lane index for RHSID!");
10093     unsigned ExtLane = 0;
10094     SDValue Input;
10095 
10096     // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs
10097     // convert into a higher type.
10098     if (RHSID & 0x4) {
10099       int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1;
10100       if (MaskElt == -1)
10101         MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1;
10102       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10103       ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2);
10104       Input = MaskElt < 2 ? V1 : V2;
10105       if (VT.getScalarSizeInBits() == 16) {
10106         Input = DAG.getBitcast(MVT::v2f32, Input);
10107         OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
10108       } else {
10109         assert(VT.getScalarSizeInBits() == 32 &&
10110                "Expected 16 or 32 bit shuffle elemements");
10111         Input = DAG.getBitcast(MVT::v2f64, Input);
10112         OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
10113       }
10114     } else {
10115       int MaskElt = getPFIDLane(ID, RHSID);
10116       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10117       ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4);
10118       Input = MaskElt < 4 ? V1 : V2;
10119       // Be careful about creating illegal types. Use f16 instead of i16.
10120       if (VT == MVT::v4i16) {
10121         Input = DAG.getBitcast(MVT::v4f16, Input);
10122         OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS);
10123       }
10124     }
10125     SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
10126                               Input.getValueType().getVectorElementType(),
10127                               Input, DAG.getVectorIdxConstant(ExtLane, dl));
10128     SDValue Ins =
10129         DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS,
10130                     Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl));
10131     return DAG.getBitcast(VT, Ins);
10132   }
10133 
10134   SDValue OpLHS, OpRHS;
10135   OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS,
10136                                  RHS, DAG, dl);
10137   OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS,
10138                                  RHS, DAG, dl);
10139   EVT VT = OpLHS.getValueType();
10140 
10141   switch (OpNum) {
10142   default:
10143     llvm_unreachable("Unknown shuffle opcode!");
10144   case OP_VREV:
10145     // VREV divides the vector in half and swaps within the half.
10146     if (VT.getVectorElementType() == MVT::i32 ||
10147         VT.getVectorElementType() == MVT::f32)
10148       return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
10149     // vrev <4 x i16> -> REV32
10150     if (VT.getVectorElementType() == MVT::i16 ||
10151         VT.getVectorElementType() == MVT::f16 ||
10152         VT.getVectorElementType() == MVT::bf16)
10153       return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
10154     // vrev <4 x i8> -> REV16
10155     assert(VT.getVectorElementType() == MVT::i8);
10156     return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
10157   case OP_VDUP0:
10158   case OP_VDUP1:
10159   case OP_VDUP2:
10160   case OP_VDUP3: {
10161     EVT EltTy = VT.getVectorElementType();
10162     unsigned Opcode;
10163     if (EltTy == MVT::i8)
10164       Opcode = AArch64ISD::DUPLANE8;
10165     else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16)
10166       Opcode = AArch64ISD::DUPLANE16;
10167     else if (EltTy == MVT::i32 || EltTy == MVT::f32)
10168       Opcode = AArch64ISD::DUPLANE32;
10169     else if (EltTy == MVT::i64 || EltTy == MVT::f64)
10170       Opcode = AArch64ISD::DUPLANE64;
10171     else
10172       llvm_unreachable("Invalid vector element type?");
10173 
10174     if (VT.getSizeInBits() == 64)
10175       OpLHS = WidenVector(OpLHS, DAG);
10176     SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
10177     return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
10178   }
10179   case OP_VEXT1:
10180   case OP_VEXT2:
10181   case OP_VEXT3: {
10182     unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
10183     return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
10184                        DAG.getConstant(Imm, dl, MVT::i32));
10185   }
10186   case OP_VUZPL:
10187     return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
10188                        OpRHS);
10189   case OP_VUZPR:
10190     return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
10191                        OpRHS);
10192   case OP_VZIPL:
10193     return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
10194                        OpRHS);
10195   case OP_VZIPR:
10196     return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
10197                        OpRHS);
10198   case OP_VTRNL:
10199     return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
10200                        OpRHS);
10201   case OP_VTRNR:
10202     return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
10203                        OpRHS);
10204   }
10205 }
10206 
10207 static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
10208                            SelectionDAG &DAG) {
10209   // Check to see if we can use the TBL instruction.
10210   SDValue V1 = Op.getOperand(0);
10211   SDValue V2 = Op.getOperand(1);
10212   SDLoc DL(Op);
10213 
10214   EVT EltVT = Op.getValueType().getVectorElementType();
10215   unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
10216 
10217   bool Swap = false;
10218   if (V1.isUndef() || isZerosVector(V1.getNode())) {
10219     std::swap(V1, V2);
10220     Swap = true;
10221   }
10222 
10223   // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
10224   // out of range values with 0s. We do need to make sure that any out-of-range
10225   // values are really out-of-range for a v16i8 vector.
10226   bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
10227   MVT IndexVT = MVT::v8i8;
10228   unsigned IndexLen = 8;
10229   if (Op.getValueSizeInBits() == 128) {
10230     IndexVT = MVT::v16i8;
10231     IndexLen = 16;
10232   }
10233 
10234   SmallVector<SDValue, 8> TBLMask;
10235   for (int Val : ShuffleMask) {
10236     for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
10237       unsigned Offset = Byte + Val * BytesPerElt;
10238       if (Swap)
10239         Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen;
10240       if (IsUndefOrZero && Offset >= IndexLen)
10241         Offset = 255;
10242       TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
10243     }
10244   }
10245 
10246   SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
10247   SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
10248 
10249   SDValue Shuffle;
10250   if (IsUndefOrZero) {
10251     if (IndexLen == 8)
10252       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
10253     Shuffle = DAG.getNode(
10254         ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10255         DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10256         DAG.getBuildVector(IndexVT, DL,
10257                            makeArrayRef(TBLMask.data(), IndexLen)));
10258   } else {
10259     if (IndexLen == 8) {
10260       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
10261       Shuffle = DAG.getNode(
10262           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10263           DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10264           DAG.getBuildVector(IndexVT, DL,
10265                              makeArrayRef(TBLMask.data(), IndexLen)));
10266     } else {
10267       // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
10268       // cannot currently represent the register constraints on the input
10269       // table registers.
10270       //  Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
10271       //                   DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
10272       //                   IndexLen));
10273       Shuffle = DAG.getNode(
10274           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10275           DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
10276           V2Cst, DAG.getBuildVector(IndexVT, DL,
10277                                     makeArrayRef(TBLMask.data(), IndexLen)));
10278     }
10279   }
10280   return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
10281 }
10282 
10283 static unsigned getDUPLANEOp(EVT EltType) {
10284   if (EltType == MVT::i8)
10285     return AArch64ISD::DUPLANE8;
10286   if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16)
10287     return AArch64ISD::DUPLANE16;
10288   if (EltType == MVT::i32 || EltType == MVT::f32)
10289     return AArch64ISD::DUPLANE32;
10290   if (EltType == MVT::i64 || EltType == MVT::f64)
10291     return AArch64ISD::DUPLANE64;
10292 
10293   llvm_unreachable("Invalid vector element type?");
10294 }
10295 
10296 static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
10297                             unsigned Opcode, SelectionDAG &DAG) {
10298   // Try to eliminate a bitcasted extract subvector before a DUPLANE.
10299   auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) {
10300     // Match: dup (bitcast (extract_subv X, C)), LaneC
10301     if (BitCast.getOpcode() != ISD::BITCAST ||
10302         BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR)
10303       return false;
10304 
10305     // The extract index must align in the destination type. That may not
10306     // happen if the bitcast is from narrow to wide type.
10307     SDValue Extract = BitCast.getOperand(0);
10308     unsigned ExtIdx = Extract.getConstantOperandVal(1);
10309     unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits();
10310     unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth;
10311     unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits();
10312     if (ExtIdxInBits % CastedEltBitWidth != 0)
10313       return false;
10314 
10315     // Can't handle cases where vector size is not 128-bit
10316     if (!Extract.getOperand(0).getValueType().is128BitVector())
10317       return false;
10318 
10319     // Update the lane value by offsetting with the scaled extract index.
10320     LaneC += ExtIdxInBits / CastedEltBitWidth;
10321 
10322     // Determine the casted vector type of the wide vector input.
10323     // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC'
10324     // Examples:
10325     // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3
10326     // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5
10327     unsigned SrcVecNumElts =
10328         Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth;
10329     CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(),
10330                               SrcVecNumElts);
10331     return true;
10332   };
10333   MVT CastVT;
10334   if (getScaledOffsetDup(V, Lane, CastVT)) {
10335     V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
10336   } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10337              V.getOperand(0).getValueType().is128BitVector()) {
10338     // The lane is incremented by the index of the extract.
10339     // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
10340     Lane += V.getConstantOperandVal(1);
10341     V = V.getOperand(0);
10342   } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
10343     // The lane is decremented if we are splatting from the 2nd operand.
10344     // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
10345     unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
10346     Lane -= Idx * VT.getVectorNumElements() / 2;
10347     V = WidenVector(V.getOperand(Idx), DAG);
10348   } else if (VT.getSizeInBits() == 64) {
10349     // Widen the operand to 128-bit register with undef.
10350     V = WidenVector(V, DAG);
10351   }
10352   return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64));
10353 }
10354 
10355 // Return true if we can get a new shuffle mask by checking the parameter mask
10356 // array to test whether every two adjacent mask values are continuous and
10357 // starting from an even number.
10358 static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
10359                            SmallVectorImpl<int> &NewMask) {
10360   unsigned NumElts = VT.getVectorNumElements();
10361   if (NumElts % 2 != 0)
10362     return false;
10363 
10364   NewMask.clear();
10365   for (unsigned i = 0; i < NumElts; i += 2) {
10366     int M0 = M[i];
10367     int M1 = M[i + 1];
10368 
10369     // If both elements are undef, new mask is undef too.
10370     if (M0 == -1 && M1 == -1) {
10371       NewMask.push_back(-1);
10372       continue;
10373     }
10374 
10375     if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
10376       NewMask.push_back(M1 / 2);
10377       continue;
10378     }
10379 
10380     if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
10381       NewMask.push_back(M0 / 2);
10382       continue;
10383     }
10384 
10385     NewMask.clear();
10386     return false;
10387   }
10388 
10389   assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
10390   return true;
10391 }
10392 
10393 // Try to widen element type to get a new mask value for a better permutation
10394 // sequence, so that we can use NEON shuffle instructions, such as zip1/2,
10395 // UZP1/2, TRN1/2, REV, INS, etc.
10396 // For example:
10397 //  shufflevector <4 x i32> %a, <4 x i32> %b,
10398 //                <4 x i32> <i32 6, i32 7, i32 2, i32 3>
10399 // is equivalent to:
10400 //  shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
10401 // Finally, we can get:
10402 //  mov     v0.d[0], v1.d[1]
10403 static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
10404   SDLoc DL(Op);
10405   EVT VT = Op.getValueType();
10406   EVT ScalarVT = VT.getVectorElementType();
10407   unsigned ElementSize = ScalarVT.getFixedSizeInBits();
10408   SDValue V0 = Op.getOperand(0);
10409   SDValue V1 = Op.getOperand(1);
10410   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10411 
10412   // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ...
10413   // We need to make sure the wider element type is legal. Thus, ElementSize
10414   // should be not larger than 32 bits, and i1 type should also be excluded.
10415   if (ElementSize > 32 || ElementSize == 1)
10416     return SDValue();
10417 
10418   SmallVector<int, 8> NewMask;
10419   if (isWideTypeMask(Mask, VT, NewMask)) {
10420     MVT NewEltVT = VT.isFloatingPoint()
10421                        ? MVT::getFloatingPointVT(ElementSize * 2)
10422                        : MVT::getIntegerVT(ElementSize * 2);
10423     MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10424     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
10425       V0 = DAG.getBitcast(NewVT, V0);
10426       V1 = DAG.getBitcast(NewVT, V1);
10427       return DAG.getBitcast(VT,
10428                             DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask));
10429     }
10430   }
10431 
10432   return SDValue();
10433 }
10434 
10435 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
10436                                                    SelectionDAG &DAG) const {
10437   SDLoc dl(Op);
10438   EVT VT = Op.getValueType();
10439 
10440   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
10441 
10442   if (useSVEForFixedLengthVectorVT(VT))
10443     return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
10444 
10445   // Convert shuffles that are directly supported on NEON to target-specific
10446   // DAG nodes, instead of keeping them as shuffles and matching them again
10447   // during code selection.  This is more efficient and avoids the possibility
10448   // of inconsistencies between legalization and selection.
10449   ArrayRef<int> ShuffleMask = SVN->getMask();
10450 
10451   SDValue V1 = Op.getOperand(0);
10452   SDValue V2 = Op.getOperand(1);
10453 
10454   assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!");
10455   assert(ShuffleMask.size() == VT.getVectorNumElements() &&
10456          "Unexpected VECTOR_SHUFFLE mask size!");
10457 
10458   if (SVN->isSplat()) {
10459     int Lane = SVN->getSplatIndex();
10460     // If this is undef splat, generate it via "just" vdup, if possible.
10461     if (Lane == -1)
10462       Lane = 0;
10463 
10464     if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
10465       return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
10466                          V1.getOperand(0));
10467     // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
10468     // constant. If so, we can just reference the lane's definition directly.
10469     if (V1.getOpcode() == ISD::BUILD_VECTOR &&
10470         !isa<ConstantSDNode>(V1.getOperand(Lane)))
10471       return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
10472 
10473     // Otherwise, duplicate from the lane of the input vector.
10474     unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
10475     return constructDup(V1, Lane, dl, VT, Opcode, DAG);
10476   }
10477 
10478   // Check if the mask matches a DUP for a wider element
10479   for (unsigned LaneSize : {64U, 32U, 16U}) {
10480     unsigned Lane = 0;
10481     if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) {
10482       unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64
10483                                        : LaneSize == 32 ? AArch64ISD::DUPLANE32
10484                                                         : AArch64ISD::DUPLANE16;
10485       // Cast V1 to an integer vector with required lane size
10486       MVT NewEltTy = MVT::getIntegerVT(LaneSize);
10487       unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
10488       MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
10489       V1 = DAG.getBitcast(NewVecTy, V1);
10490       // Constuct the DUP instruction
10491       V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
10492       // Cast back to the original type
10493       return DAG.getBitcast(VT, V1);
10494     }
10495   }
10496 
10497   if (isREVMask(ShuffleMask, VT, 64))
10498     return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
10499   if (isREVMask(ShuffleMask, VT, 32))
10500     return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
10501   if (isREVMask(ShuffleMask, VT, 16))
10502     return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
10503 
10504   if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) ||
10505        (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) &&
10506       ShuffleVectorInst::isReverseMask(ShuffleMask)) {
10507     SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1);
10508     return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev,
10509                        DAG.getConstant(8, dl, MVT::i32));
10510   }
10511 
10512   bool ReverseEXT = false;
10513   unsigned Imm;
10514   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
10515     if (ReverseEXT)
10516       std::swap(V1, V2);
10517     Imm *= getExtFactor(V1);
10518     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
10519                        DAG.getConstant(Imm, dl, MVT::i32));
10520   } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
10521     Imm *= getExtFactor(V1);
10522     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
10523                        DAG.getConstant(Imm, dl, MVT::i32));
10524   }
10525 
10526   unsigned WhichResult;
10527   if (isZIPMask(ShuffleMask, VT, WhichResult)) {
10528     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10529     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10530   }
10531   if (isUZPMask(ShuffleMask, VT, WhichResult)) {
10532     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10533     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10534   }
10535   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
10536     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10537     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10538   }
10539 
10540   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10541     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10542     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10543   }
10544   if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10545     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10546     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10547   }
10548   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10549     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10550     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10551   }
10552 
10553   if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
10554     return Concat;
10555 
10556   bool DstIsLeft;
10557   int Anomaly;
10558   int NumInputElements = V1.getValueType().getVectorNumElements();
10559   if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
10560     SDValue DstVec = DstIsLeft ? V1 : V2;
10561     SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
10562 
10563     SDValue SrcVec = V1;
10564     int SrcLane = ShuffleMask[Anomaly];
10565     if (SrcLane >= NumInputElements) {
10566       SrcVec = V2;
10567       SrcLane -= VT.getVectorNumElements();
10568     }
10569     SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
10570 
10571     EVT ScalarVT = VT.getVectorElementType();
10572 
10573     if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger())
10574       ScalarVT = MVT::i32;
10575 
10576     return DAG.getNode(
10577         ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10578         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
10579         DstLaneV);
10580   }
10581 
10582   if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG))
10583     return NewSD;
10584 
10585   // If the shuffle is not directly supported and it has 4 elements, use
10586   // the PerfectShuffle-generated table to synthesize it from other shuffles.
10587   unsigned NumElts = VT.getVectorNumElements();
10588   if (NumElts == 4) {
10589     unsigned PFIndexes[4];
10590     for (unsigned i = 0; i != 4; ++i) {
10591       if (ShuffleMask[i] < 0)
10592         PFIndexes[i] = 8;
10593       else
10594         PFIndexes[i] = ShuffleMask[i];
10595     }
10596 
10597     // Compute the index in the perfect shuffle table.
10598     unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10599                             PFIndexes[2] * 9 + PFIndexes[3];
10600     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10601     return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG,
10602                                   dl);
10603   }
10604 
10605   return GenerateTBL(Op, ShuffleMask, DAG);
10606 }
10607 
10608 SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
10609                                                  SelectionDAG &DAG) const {
10610   EVT VT = Op.getValueType();
10611 
10612   if (useSVEForFixedLengthVectorVT(VT))
10613     return LowerToScalableOp(Op, DAG);
10614 
10615   assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
10616          "Unexpected vector type!");
10617 
10618   // We can handle the constant cases during isel.
10619   if (isa<ConstantSDNode>(Op.getOperand(0)))
10620     return Op;
10621 
10622   // There isn't a natural way to handle the general i1 case, so we use some
10623   // trickery with whilelo.
10624   SDLoc DL(Op);
10625   SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64);
10626   SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal,
10627                          DAG.getValueType(MVT::i1));
10628   SDValue ID =
10629       DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
10630   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
10631   if (VT == MVT::nxv1i1)
10632     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1,
10633                        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID,
10634                                    Zero, SplatVal),
10635                        Zero);
10636   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal);
10637 }
10638 
10639 SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
10640                                              SelectionDAG &DAG) const {
10641   SDLoc DL(Op);
10642 
10643   EVT VT = Op.getValueType();
10644   if (!isTypeLegal(VT) || !VT.isScalableVector())
10645     return SDValue();
10646 
10647   // Current lowering only supports the SVE-ACLE types.
10648   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
10649     return SDValue();
10650 
10651   // The DUPQ operation is indepedent of element type so normalise to i64s.
10652   SDValue Idx128 = Op.getOperand(2);
10653 
10654   // DUPQ can be used when idx is in range.
10655   auto *CIdx = dyn_cast<ConstantSDNode>(Idx128);
10656   if (CIdx && (CIdx->getZExtValue() <= 3)) {
10657     SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64);
10658     return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI);
10659   }
10660 
10661   SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1));
10662 
10663   // The ACLE says this must produce the same result as:
10664   //   svtbl(data, svadd_x(svptrue_b64(),
10665   //                       svand_x(svptrue_b64(), svindex_u64(0, 1), 1),
10666   //                       index * 2))
10667   SDValue One = DAG.getConstant(1, DL, MVT::i64);
10668   SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One);
10669 
10670   // create the vector 0,1,0,1,...
10671   SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64);
10672   SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne);
10673 
10674   // create the vector idx64,idx64+1,idx64,idx64+1,...
10675   SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128);
10676   SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64);
10677   SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64);
10678 
10679   // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],...
10680   SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask);
10681   return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
10682 }
10683 
10684 
10685 static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
10686                                APInt &UndefBits) {
10687   EVT VT = BVN->getValueType(0);
10688   APInt SplatBits, SplatUndef;
10689   unsigned SplatBitSize;
10690   bool HasAnyUndefs;
10691   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10692     unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
10693 
10694     for (unsigned i = 0; i < NumSplats; ++i) {
10695       CnstBits <<= SplatBitSize;
10696       UndefBits <<= SplatBitSize;
10697       CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
10698       UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
10699     }
10700 
10701     return true;
10702   }
10703 
10704   return false;
10705 }
10706 
10707 // Try 64-bit splatted SIMD immediate.
10708 static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10709                                  const APInt &Bits) {
10710   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10711     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10712     EVT VT = Op.getValueType();
10713     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
10714 
10715     if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
10716       Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
10717 
10718       SDLoc dl(Op);
10719       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10720                                 DAG.getConstant(Value, dl, MVT::i32));
10721       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10722     }
10723   }
10724 
10725   return SDValue();
10726 }
10727 
10728 // Try 32-bit splatted SIMD immediate.
10729 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10730                                   const APInt &Bits,
10731                                   const SDValue *LHS = nullptr) {
10732   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10733     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10734     EVT VT = Op.getValueType();
10735     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10736     bool isAdvSIMDModImm = false;
10737     uint64_t Shift;
10738 
10739     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
10740       Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
10741       Shift = 0;
10742     }
10743     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
10744       Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
10745       Shift = 8;
10746     }
10747     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
10748       Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
10749       Shift = 16;
10750     }
10751     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
10752       Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
10753       Shift = 24;
10754     }
10755 
10756     if (isAdvSIMDModImm) {
10757       SDLoc dl(Op);
10758       SDValue Mov;
10759 
10760       if (LHS)
10761         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10762                           DAG.getConstant(Value, dl, MVT::i32),
10763                           DAG.getConstant(Shift, dl, MVT::i32));
10764       else
10765         Mov = DAG.getNode(NewOp, dl, MovTy,
10766                           DAG.getConstant(Value, dl, MVT::i32),
10767                           DAG.getConstant(Shift, dl, MVT::i32));
10768 
10769       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10770     }
10771   }
10772 
10773   return SDValue();
10774 }
10775 
10776 // Try 16-bit splatted SIMD immediate.
10777 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10778                                   const APInt &Bits,
10779                                   const SDValue *LHS = nullptr) {
10780   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10781     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10782     EVT VT = Op.getValueType();
10783     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
10784     bool isAdvSIMDModImm = false;
10785     uint64_t Shift;
10786 
10787     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
10788       Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
10789       Shift = 0;
10790     }
10791     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
10792       Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
10793       Shift = 8;
10794     }
10795 
10796     if (isAdvSIMDModImm) {
10797       SDLoc dl(Op);
10798       SDValue Mov;
10799 
10800       if (LHS)
10801         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10802                           DAG.getConstant(Value, dl, MVT::i32),
10803                           DAG.getConstant(Shift, dl, MVT::i32));
10804       else
10805         Mov = DAG.getNode(NewOp, dl, MovTy,
10806                           DAG.getConstant(Value, dl, MVT::i32),
10807                           DAG.getConstant(Shift, dl, MVT::i32));
10808 
10809       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10810     }
10811   }
10812 
10813   return SDValue();
10814 }
10815 
10816 // Try 32-bit splatted SIMD immediate with shifted ones.
10817 static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
10818                                     SelectionDAG &DAG, const APInt &Bits) {
10819   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10820     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10821     EVT VT = Op.getValueType();
10822     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10823     bool isAdvSIMDModImm = false;
10824     uint64_t Shift;
10825 
10826     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
10827       Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
10828       Shift = 264;
10829     }
10830     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
10831       Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
10832       Shift = 272;
10833     }
10834 
10835     if (isAdvSIMDModImm) {
10836       SDLoc dl(Op);
10837       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10838                                 DAG.getConstant(Value, dl, MVT::i32),
10839                                 DAG.getConstant(Shift, dl, MVT::i32));
10840       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10841     }
10842   }
10843 
10844   return SDValue();
10845 }
10846 
10847 // Try 8-bit splatted SIMD immediate.
10848 static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10849                                  const APInt &Bits) {
10850   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10851     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10852     EVT VT = Op.getValueType();
10853     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
10854 
10855     if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
10856       Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
10857 
10858       SDLoc dl(Op);
10859       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10860                                 DAG.getConstant(Value, dl, MVT::i32));
10861       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10862     }
10863   }
10864 
10865   return SDValue();
10866 }
10867 
10868 // Try FP splatted SIMD immediate.
10869 static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10870                                   const APInt &Bits) {
10871   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10872     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10873     EVT VT = Op.getValueType();
10874     bool isWide = (VT.getSizeInBits() == 128);
10875     MVT MovTy;
10876     bool isAdvSIMDModImm = false;
10877 
10878     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
10879       Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
10880       MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
10881     }
10882     else if (isWide &&
10883              (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
10884       Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
10885       MovTy = MVT::v2f64;
10886     }
10887 
10888     if (isAdvSIMDModImm) {
10889       SDLoc dl(Op);
10890       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10891                                 DAG.getConstant(Value, dl, MVT::i32));
10892       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10893     }
10894   }
10895 
10896   return SDValue();
10897 }
10898 
10899 // Specialized code to quickly find if PotentialBVec is a BuildVector that
10900 // consists of only the same constant int value, returned in reference arg
10901 // ConstVal
10902 static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
10903                                      uint64_t &ConstVal) {
10904   BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
10905   if (!Bvec)
10906     return false;
10907   ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
10908   if (!FirstElt)
10909     return false;
10910   EVT VT = Bvec->getValueType(0);
10911   unsigned NumElts = VT.getVectorNumElements();
10912   for (unsigned i = 1; i < NumElts; ++i)
10913     if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
10914       return false;
10915   ConstVal = FirstElt->getZExtValue();
10916   return true;
10917 }
10918 
10919 // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
10920 // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
10921 // BUILD_VECTORs with constant element C1, C2 is a constant, and:
10922 //   - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
10923 //   - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
10924 // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
10925 static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
10926   EVT VT = N->getValueType(0);
10927 
10928   if (!VT.isVector())
10929     return SDValue();
10930 
10931   SDLoc DL(N);
10932 
10933   SDValue And;
10934   SDValue Shift;
10935 
10936   SDValue FirstOp = N->getOperand(0);
10937   unsigned FirstOpc = FirstOp.getOpcode();
10938   SDValue SecondOp = N->getOperand(1);
10939   unsigned SecondOpc = SecondOp.getOpcode();
10940 
10941   // Is one of the operands an AND or a BICi? The AND may have been optimised to
10942   // a BICi in order to use an immediate instead of a register.
10943   // Is the other operand an shl or lshr? This will have been turned into:
10944   // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift.
10945   if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) &&
10946       (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) {
10947     And = FirstOp;
10948     Shift = SecondOp;
10949 
10950   } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) &&
10951              (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) {
10952     And = SecondOp;
10953     Shift = FirstOp;
10954   } else
10955     return SDValue();
10956 
10957   bool IsAnd = And.getOpcode() == ISD::AND;
10958   bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR;
10959 
10960   // Is the shift amount constant?
10961   ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
10962   if (!C2node)
10963     return SDValue();
10964 
10965   uint64_t C1;
10966   if (IsAnd) {
10967     // Is the and mask vector all constant?
10968     if (!isAllConstantBuildVector(And.getOperand(1), C1))
10969       return SDValue();
10970   } else {
10971     // Reconstruct the corresponding AND immediate from the two BICi immediates.
10972     ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1));
10973     ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2));
10974     assert(C1nodeImm && C1nodeShift);
10975     C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue());
10976   }
10977 
10978   // Is C1 == ~(Ones(ElemSizeInBits) << C2) or
10979   // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account
10980   // how much one can shift elements of a particular size?
10981   uint64_t C2 = C2node->getZExtValue();
10982   unsigned ElemSizeInBits = VT.getScalarSizeInBits();
10983   if (C2 > ElemSizeInBits)
10984     return SDValue();
10985 
10986   APInt C1AsAPInt(ElemSizeInBits, C1);
10987   APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2)
10988                                   : APInt::getLowBitsSet(ElemSizeInBits, C2);
10989   if (C1AsAPInt != RequiredC1)
10990     return SDValue();
10991 
10992   SDValue X = And.getOperand(0);
10993   SDValue Y = Shift.getOperand(0);
10994 
10995   unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
10996   SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1));
10997 
10998   LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
10999   LLVM_DEBUG(N->dump(&DAG));
11000   LLVM_DEBUG(dbgs() << "into: \n");
11001   LLVM_DEBUG(ResultSLI->dump(&DAG));
11002 
11003   ++NumShiftInserts;
11004   return ResultSLI;
11005 }
11006 
11007 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
11008                                              SelectionDAG &DAG) const {
11009   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11010     return LowerToScalableOp(Op, DAG);
11011 
11012   // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
11013   if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
11014     return Res;
11015 
11016   EVT VT = Op.getValueType();
11017 
11018   SDValue LHS = Op.getOperand(0);
11019   BuildVectorSDNode *BVN =
11020       dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
11021   if (!BVN) {
11022     // OR commutes, so try swapping the operands.
11023     LHS = Op.getOperand(1);
11024     BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
11025   }
11026   if (!BVN)
11027     return Op;
11028 
11029   APInt DefBits(VT.getSizeInBits(), 0);
11030   APInt UndefBits(VT.getSizeInBits(), 0);
11031   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11032     SDValue NewOp;
11033 
11034     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11035                                     DefBits, &LHS)) ||
11036         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11037                                     DefBits, &LHS)))
11038       return NewOp;
11039 
11040     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11041                                     UndefBits, &LHS)) ||
11042         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11043                                     UndefBits, &LHS)))
11044       return NewOp;
11045   }
11046 
11047   // We can always fall back to a non-immediate OR.
11048   return Op;
11049 }
11050 
11051 // Normalize the operands of BUILD_VECTOR. The value of constant operands will
11052 // be truncated to fit element width.
11053 static SDValue NormalizeBuildVector(SDValue Op,
11054                                     SelectionDAG &DAG) {
11055   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
11056   SDLoc dl(Op);
11057   EVT VT = Op.getValueType();
11058   EVT EltTy= VT.getVectorElementType();
11059 
11060   if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
11061     return Op;
11062 
11063   SmallVector<SDValue, 16> Ops;
11064   for (SDValue Lane : Op->ops()) {
11065     // For integer vectors, type legalization would have promoted the
11066     // operands already. Otherwise, if Op is a floating-point splat
11067     // (with operands cast to integers), then the only possibilities
11068     // are constants and UNDEFs.
11069     if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
11070       APInt LowBits(EltTy.getSizeInBits(),
11071                     CstLane->getZExtValue());
11072       Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
11073     } else if (Lane.getNode()->isUndef()) {
11074       Lane = DAG.getUNDEF(MVT::i32);
11075     } else {
11076       assert(Lane.getValueType() == MVT::i32 &&
11077              "Unexpected BUILD_VECTOR operand type");
11078     }
11079     Ops.push_back(Lane);
11080   }
11081   return DAG.getBuildVector(VT, dl, Ops);
11082 }
11083 
11084 static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
11085   EVT VT = Op.getValueType();
11086 
11087   APInt DefBits(VT.getSizeInBits(), 0);
11088   APInt UndefBits(VT.getSizeInBits(), 0);
11089   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11090   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11091     SDValue NewOp;
11092     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11093         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11094         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11095         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11096         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11097         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11098       return NewOp;
11099 
11100     DefBits = ~DefBits;
11101     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11102         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11103         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11104       return NewOp;
11105 
11106     DefBits = UndefBits;
11107     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11108         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11109         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11110         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11111         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11112         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11113       return NewOp;
11114 
11115     DefBits = ~UndefBits;
11116     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11117         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11118         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11119       return NewOp;
11120   }
11121 
11122   return SDValue();
11123 }
11124 
11125 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
11126                                                  SelectionDAG &DAG) const {
11127   EVT VT = Op.getValueType();
11128 
11129   // Try to build a simple constant vector.
11130   Op = NormalizeBuildVector(Op, DAG);
11131   if (VT.isInteger()) {
11132     // Certain vector constants, used to express things like logical NOT and
11133     // arithmetic NEG, are passed through unmodified.  This allows special
11134     // patterns for these operations to match, which will lower these constants
11135     // to whatever is proven necessary.
11136     BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11137     if (BVN->isConstant())
11138       if (ConstantSDNode *Const = BVN->getConstantSplatNode()) {
11139         unsigned BitSize = VT.getVectorElementType().getSizeInBits();
11140         APInt Val(BitSize,
11141                   Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
11142         if (Val.isZero() || Val.isAllOnes())
11143           return Op;
11144       }
11145   }
11146 
11147   if (SDValue V = ConstantBuildVector(Op, DAG))
11148     return V;
11149 
11150   // Scan through the operands to find some interesting properties we can
11151   // exploit:
11152   //   1) If only one value is used, we can use a DUP, or
11153   //   2) if only the low element is not undef, we can just insert that, or
11154   //   3) if only one constant value is used (w/ some non-constant lanes),
11155   //      we can splat the constant value into the whole vector then fill
11156   //      in the non-constant lanes.
11157   //   4) FIXME: If different constant values are used, but we can intelligently
11158   //             select the values we'll be overwriting for the non-constant
11159   //             lanes such that we can directly materialize the vector
11160   //             some other way (MOVI, e.g.), we can be sneaky.
11161   //   5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP.
11162   SDLoc dl(Op);
11163   unsigned NumElts = VT.getVectorNumElements();
11164   bool isOnlyLowElement = true;
11165   bool usesOnlyOneValue = true;
11166   bool usesOnlyOneConstantValue = true;
11167   bool isConstant = true;
11168   bool AllLanesExtractElt = true;
11169   unsigned NumConstantLanes = 0;
11170   unsigned NumDifferentLanes = 0;
11171   unsigned NumUndefLanes = 0;
11172   SDValue Value;
11173   SDValue ConstantValue;
11174   for (unsigned i = 0; i < NumElts; ++i) {
11175     SDValue V = Op.getOperand(i);
11176     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11177       AllLanesExtractElt = false;
11178     if (V.isUndef()) {
11179       ++NumUndefLanes;
11180       continue;
11181     }
11182     if (i > 0)
11183       isOnlyLowElement = false;
11184     if (!isIntOrFPConstant(V))
11185       isConstant = false;
11186 
11187     if (isIntOrFPConstant(V)) {
11188       ++NumConstantLanes;
11189       if (!ConstantValue.getNode())
11190         ConstantValue = V;
11191       else if (ConstantValue != V)
11192         usesOnlyOneConstantValue = false;
11193     }
11194 
11195     if (!Value.getNode())
11196       Value = V;
11197     else if (V != Value) {
11198       usesOnlyOneValue = false;
11199       ++NumDifferentLanes;
11200     }
11201   }
11202 
11203   if (!Value.getNode()) {
11204     LLVM_DEBUG(
11205         dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
11206     return DAG.getUNDEF(VT);
11207   }
11208 
11209   // Convert BUILD_VECTOR where all elements but the lowest are undef into
11210   // SCALAR_TO_VECTOR, except for when we have a single-element constant vector
11211   // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR.
11212   if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) {
11213     LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
11214                          "SCALAR_TO_VECTOR node\n");
11215     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
11216   }
11217 
11218   if (AllLanesExtractElt) {
11219     SDNode *Vector = nullptr;
11220     bool Even = false;
11221     bool Odd = false;
11222     // Check whether the extract elements match the Even pattern <0,2,4,...> or
11223     // the Odd pattern <1,3,5,...>.
11224     for (unsigned i = 0; i < NumElts; ++i) {
11225       SDValue V = Op.getOperand(i);
11226       const SDNode *N = V.getNode();
11227       if (!isa<ConstantSDNode>(N->getOperand(1)))
11228         break;
11229       SDValue N0 = N->getOperand(0);
11230 
11231       // All elements are extracted from the same vector.
11232       if (!Vector) {
11233         Vector = N0.getNode();
11234         // Check that the type of EXTRACT_VECTOR_ELT matches the type of
11235         // BUILD_VECTOR.
11236         if (VT.getVectorElementType() !=
11237             N0.getValueType().getVectorElementType())
11238           break;
11239       } else if (Vector != N0.getNode()) {
11240         Odd = false;
11241         Even = false;
11242         break;
11243       }
11244 
11245       // Extracted values are either at Even indices <0,2,4,...> or at Odd
11246       // indices <1,3,5,...>.
11247       uint64_t Val = N->getConstantOperandVal(1);
11248       if (Val == 2 * i) {
11249         Even = true;
11250         continue;
11251       }
11252       if (Val - 1 == 2 * i) {
11253         Odd = true;
11254         continue;
11255       }
11256 
11257       // Something does not match: abort.
11258       Odd = false;
11259       Even = false;
11260       break;
11261     }
11262     if (Even || Odd) {
11263       SDValue LHS =
11264           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11265                       DAG.getConstant(0, dl, MVT::i64));
11266       SDValue RHS =
11267           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11268                       DAG.getConstant(NumElts, dl, MVT::i64));
11269 
11270       if (Even && !Odd)
11271         return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
11272                            RHS);
11273       if (Odd && !Even)
11274         return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
11275                            RHS);
11276     }
11277   }
11278 
11279   // Use DUP for non-constant splats. For f32 constant splats, reduce to
11280   // i32 and try again.
11281   if (usesOnlyOneValue) {
11282     if (!isConstant) {
11283       if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11284           Value.getValueType() != VT) {
11285         LLVM_DEBUG(
11286             dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
11287         return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
11288       }
11289 
11290       // This is actually a DUPLANExx operation, which keeps everything vectory.
11291 
11292       SDValue Lane = Value.getOperand(1);
11293       Value = Value.getOperand(0);
11294       if (Value.getValueSizeInBits() == 64) {
11295         LLVM_DEBUG(
11296             dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
11297                       "widening it\n");
11298         Value = WidenVector(Value, DAG);
11299       }
11300 
11301       unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
11302       return DAG.getNode(Opcode, dl, VT, Value, Lane);
11303     }
11304 
11305     if (VT.getVectorElementType().isFloatingPoint()) {
11306       SmallVector<SDValue, 8> Ops;
11307       EVT EltTy = VT.getVectorElementType();
11308       assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||
11309                EltTy == MVT::f64) && "Unsupported floating-point vector type");
11310       LLVM_DEBUG(
11311           dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
11312                     "BITCASTS, and try again\n");
11313       MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
11314       for (unsigned i = 0; i < NumElts; ++i)
11315         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
11316       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
11317       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
11318       LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
11319                  Val.dump(););
11320       Val = LowerBUILD_VECTOR(Val, DAG);
11321       if (Val.getNode())
11322         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
11323     }
11324   }
11325 
11326   // If we need to insert a small number of different non-constant elements and
11327   // the vector width is sufficiently large, prefer using DUP with the common
11328   // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred,
11329   // skip the constant lane handling below.
11330   bool PreferDUPAndInsert =
11331       !isConstant && NumDifferentLanes >= 1 &&
11332       NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) &&
11333       NumDifferentLanes >= NumConstantLanes;
11334 
11335   // If there was only one constant value used and for more than one lane,
11336   // start by splatting that value, then replace the non-constant lanes. This
11337   // is better than the default, which will perform a separate initialization
11338   // for each lane.
11339   if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) {
11340     // Firstly, try to materialize the splat constant.
11341     SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue),
11342             Val = ConstantBuildVector(Vec, DAG);
11343     if (!Val) {
11344       // Otherwise, materialize the constant and splat it.
11345       Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
11346       DAG.ReplaceAllUsesWith(Vec.getNode(), &Val);
11347     }
11348 
11349     // Now insert the non-constant lanes.
11350     for (unsigned i = 0; i < NumElts; ++i) {
11351       SDValue V = Op.getOperand(i);
11352       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11353       if (!isIntOrFPConstant(V))
11354         // Note that type legalization likely mucked about with the VT of the
11355         // source operand, so we may have to convert it here before inserting.
11356         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
11357     }
11358     return Val;
11359   }
11360 
11361   // This will generate a load from the constant pool.
11362   if (isConstant) {
11363     LLVM_DEBUG(
11364         dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
11365                   "expansion\n");
11366     return SDValue();
11367   }
11368 
11369   // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
11370   // v4i32s. This is really a truncate, which we can construct out of (legal)
11371   // concats and truncate nodes.
11372   if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG))
11373     return M;
11374 
11375   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
11376   if (NumElts >= 4) {
11377     if (SDValue shuffle = ReconstructShuffle(Op, DAG))
11378       return shuffle;
11379   }
11380 
11381   if (PreferDUPAndInsert) {
11382     // First, build a constant vector with the common element.
11383     SmallVector<SDValue, 8> Ops(NumElts, Value);
11384     SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
11385     // Next, insert the elements that do not match the common value.
11386     for (unsigned I = 0; I < NumElts; ++I)
11387       if (Op.getOperand(I) != Value)
11388         NewVector =
11389             DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
11390                         Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
11391 
11392     return NewVector;
11393   }
11394 
11395   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
11396   // know the default expansion would otherwise fall back on something even
11397   // worse. For a vector with one or two non-undef values, that's
11398   // scalar_to_vector for the elements followed by a shuffle (provided the
11399   // shuffle is valid for the target) and materialization element by element
11400   // on the stack followed by a load for everything else.
11401   if (!isConstant && !usesOnlyOneValue) {
11402     LLVM_DEBUG(
11403         dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
11404                   "of INSERT_VECTOR_ELT\n");
11405 
11406     SDValue Vec = DAG.getUNDEF(VT);
11407     SDValue Op0 = Op.getOperand(0);
11408     unsigned i = 0;
11409 
11410     // Use SCALAR_TO_VECTOR for lane zero to
11411     // a) Avoid a RMW dependency on the full vector register, and
11412     // b) Allow the register coalescer to fold away the copy if the
11413     //    value is already in an S or D register, and we're forced to emit an
11414     //    INSERT_SUBREG that we can't fold anywhere.
11415     //
11416     // We also allow types like i8 and i16 which are illegal scalar but legal
11417     // vector element types. After type-legalization the inserted value is
11418     // extended (i32) and it is safe to cast them to the vector type by ignoring
11419     // the upper bits of the lowest lane (e.g. v8i8, v4i16).
11420     if (!Op0.isUndef()) {
11421       LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
11422       Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
11423       ++i;
11424     }
11425     LLVM_DEBUG(if (i < NumElts) dbgs()
11426                    << "Creating nodes for the other vector elements:\n";);
11427     for (; i < NumElts; ++i) {
11428       SDValue V = Op.getOperand(i);
11429       if (V.isUndef())
11430         continue;
11431       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11432       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
11433     }
11434     return Vec;
11435   }
11436 
11437   LLVM_DEBUG(
11438       dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
11439                 "better alternative\n");
11440   return SDValue();
11441 }
11442 
11443 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
11444                                                    SelectionDAG &DAG) const {
11445   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11446     return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
11447 
11448   assert(Op.getValueType().isScalableVector() &&
11449          isTypeLegal(Op.getValueType()) &&
11450          "Expected legal scalable vector type!");
11451 
11452   if (isTypeLegal(Op.getOperand(0).getValueType())) {
11453     unsigned NumOperands = Op->getNumOperands();
11454     assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11455            "Unexpected number of operands in CONCAT_VECTORS");
11456 
11457     if (NumOperands == 2)
11458       return Op;
11459 
11460     // Concat each pair of subvectors and pack into the lower half of the array.
11461     SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
11462     while (ConcatOps.size() > 1) {
11463       for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
11464         SDValue V1 = ConcatOps[I];
11465         SDValue V2 = ConcatOps[I + 1];
11466         EVT SubVT = V1.getValueType();
11467         EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext());
11468         ConcatOps[I / 2] =
11469             DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2);
11470       }
11471       ConcatOps.resize(ConcatOps.size() / 2);
11472     }
11473     return ConcatOps[0];
11474   }
11475 
11476   return SDValue();
11477 }
11478 
11479 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11480                                                       SelectionDAG &DAG) const {
11481   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
11482 
11483   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11484     return LowerFixedLengthInsertVectorElt(Op, DAG);
11485 
11486   // Check for non-constant or out of range lane.
11487   EVT VT = Op.getOperand(0).getValueType();
11488 
11489   if (VT.getScalarType() == MVT::i1) {
11490     EVT VectorVT = getPromotedVTForPredicate(VT);
11491     SDLoc DL(Op);
11492     SDValue ExtendedVector =
11493         DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT);
11494     SDValue ExtendedValue =
11495         DAG.getAnyExtOrTrunc(Op.getOperand(1), DL,
11496                              VectorVT.getScalarType().getSizeInBits() < 32
11497                                  ? MVT::i32
11498                                  : VectorVT.getScalarType());
11499     ExtendedVector =
11500         DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector,
11501                     ExtendedValue, Op.getOperand(2));
11502     return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT);
11503   }
11504 
11505   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
11506   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11507     return SDValue();
11508 
11509   // Insertion/extraction are legal for V128 types.
11510   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11511       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11512       VT == MVT::v8f16 || VT == MVT::v8bf16)
11513     return Op;
11514 
11515   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11516       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11517       VT != MVT::v4bf16)
11518     return SDValue();
11519 
11520   // For V64 types, we perform insertion by expanding the value
11521   // to a V128 type and perform the insertion on that.
11522   SDLoc DL(Op);
11523   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11524   EVT WideTy = WideVec.getValueType();
11525 
11526   SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
11527                              Op.getOperand(1), Op.getOperand(2));
11528   // Re-narrow the resultant vector.
11529   return NarrowVector(Node, DAG);
11530 }
11531 
11532 SDValue
11533 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
11534                                                SelectionDAG &DAG) const {
11535   assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
11536   EVT VT = Op.getOperand(0).getValueType();
11537 
11538   if (VT.getScalarType() == MVT::i1) {
11539     // We can't directly extract from an SVE predicate; extend it first.
11540     // (This isn't the only possible lowering, but it's straightforward.)
11541     EVT VectorVT = getPromotedVTForPredicate(VT);
11542     SDLoc DL(Op);
11543     SDValue Extend =
11544         DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0));
11545     MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32;
11546     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy,
11547                                   Extend, Op.getOperand(1));
11548     return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
11549   }
11550 
11551   if (useSVEForFixedLengthVectorVT(VT))
11552     return LowerFixedLengthExtractVectorElt(Op, DAG);
11553 
11554   // Check for non-constant or out of range lane.
11555   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
11556   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11557     return SDValue();
11558 
11559   // Insertion/extraction are legal for V128 types.
11560   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11561       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11562       VT == MVT::v8f16 || VT == MVT::v8bf16)
11563     return Op;
11564 
11565   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11566       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11567       VT != MVT::v4bf16)
11568     return SDValue();
11569 
11570   // For V64 types, we perform extraction by expanding the value
11571   // to a V128 type and perform the extraction on that.
11572   SDLoc DL(Op);
11573   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11574   EVT WideTy = WideVec.getValueType();
11575 
11576   EVT ExtrTy = WideTy.getVectorElementType();
11577   if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
11578     ExtrTy = MVT::i32;
11579 
11580   // For extractions, we just return the result directly.
11581   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
11582                      Op.getOperand(1));
11583 }
11584 
11585 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
11586                                                       SelectionDAG &DAG) const {
11587   assert(Op.getValueType().isFixedLengthVector() &&
11588          "Only cases that extract a fixed length vector are supported!");
11589 
11590   EVT InVT = Op.getOperand(0).getValueType();
11591   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
11592   unsigned Size = Op.getValueSizeInBits();
11593 
11594   // If we don't have legal types yet, do nothing
11595   if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
11596     return SDValue();
11597 
11598   if (InVT.isScalableVector()) {
11599     // This will be matched by custom code during ISelDAGToDAG.
11600     if (Idx == 0 && isPackedVectorType(InVT, DAG))
11601       return Op;
11602 
11603     return SDValue();
11604   }
11605 
11606   // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
11607   if (Idx == 0 && InVT.getSizeInBits() <= 128)
11608     return Op;
11609 
11610   // If this is extracting the upper 64-bits of a 128-bit vector, we match
11611   // that directly.
11612   if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
11613       InVT.getSizeInBits() == 128)
11614     return Op;
11615 
11616   if (useSVEForFixedLengthVectorVT(InVT)) {
11617     SDLoc DL(Op);
11618 
11619     EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
11620     SDValue NewInVec =
11621         convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
11622 
11623     SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
11624                                  NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
11625     return convertFromScalableVector(DAG, Op.getValueType(), Splice);
11626   }
11627 
11628   return SDValue();
11629 }
11630 
11631 SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
11632                                                      SelectionDAG &DAG) const {
11633   assert(Op.getValueType().isScalableVector() &&
11634          "Only expect to lower inserts into scalable vectors!");
11635 
11636   EVT InVT = Op.getOperand(1).getValueType();
11637   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
11638 
11639   SDValue Vec0 = Op.getOperand(0);
11640   SDValue Vec1 = Op.getOperand(1);
11641   SDLoc DL(Op);
11642   EVT VT = Op.getValueType();
11643 
11644   if (InVT.isScalableVector()) {
11645     if (!isTypeLegal(VT))
11646       return SDValue();
11647 
11648     // Break down insert_subvector into simpler parts.
11649     if (VT.getVectorElementType() == MVT::i1) {
11650       unsigned NumElts = VT.getVectorMinNumElements();
11651       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
11652 
11653       SDValue Lo, Hi;
11654       Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11655                        DAG.getVectorIdxConstant(0, DL));
11656       Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11657                        DAG.getVectorIdxConstant(NumElts / 2, DL));
11658       if (Idx < (NumElts / 2)) {
11659         SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
11660                                     DAG.getVectorIdxConstant(Idx, DL));
11661         return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
11662       } else {
11663         SDValue NewHi =
11664             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
11665                         DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
11666         return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
11667       }
11668     }
11669 
11670     // Ensure the subvector is half the size of the main vector.
11671     if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
11672       return SDValue();
11673 
11674     // Here narrow and wide refers to the vector element types. After "casting"
11675     // both vectors must have the same bit length and so because the subvector
11676     // has fewer elements, those elements need to be bigger.
11677     EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount());
11678     EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount());
11679 
11680     // NOP cast operands to the largest legal vector of the same element count.
11681     if (VT.isFloatingPoint()) {
11682       Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
11683       Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
11684     } else {
11685       // Legal integer vectors are already their largest so Vec0 is fine as is.
11686       Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
11687     }
11688 
11689     // To replace the top/bottom half of vector V with vector SubV we widen the
11690     // preserved half of V, concatenate this to SubV (the order depending on the
11691     // half being replaced) and then narrow the result.
11692     SDValue Narrow;
11693     if (Idx == 0) {
11694       SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
11695       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
11696     } else {
11697       assert(Idx == InVT.getVectorMinNumElements() &&
11698              "Invalid subvector index!");
11699       SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
11700       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
11701     }
11702 
11703     return getSVESafeBitCast(VT, Narrow, DAG);
11704   }
11705 
11706   if (Idx == 0 && isPackedVectorType(VT, DAG)) {
11707     // This will be matched by custom code during ISelDAGToDAG.
11708     if (Vec0.isUndef())
11709       return Op;
11710 
11711     Optional<unsigned> PredPattern =
11712         getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
11713     auto PredTy = VT.changeVectorElementType(MVT::i1);
11714     SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
11715     SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
11716     return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
11717   }
11718 
11719   return SDValue();
11720 }
11721 
11722 static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {
11723   if (Op.getOpcode() != AArch64ISD::DUP &&
11724       Op.getOpcode() != ISD::SPLAT_VECTOR &&
11725       Op.getOpcode() != ISD::BUILD_VECTOR)
11726     return false;
11727 
11728   if (Op.getOpcode() == ISD::BUILD_VECTOR &&
11729       !isAllConstantBuildVector(Op, SplatVal))
11730     return false;
11731 
11732   if (Op.getOpcode() != ISD::BUILD_VECTOR &&
11733       !isa<ConstantSDNode>(Op->getOperand(0)))
11734     return false;
11735 
11736   SplatVal = Op->getConstantOperandVal(0);
11737   if (Op.getValueType().getVectorElementType() != MVT::i64)
11738     SplatVal = (int32_t)SplatVal;
11739 
11740   Negated = false;
11741   if (isPowerOf2_64(SplatVal))
11742     return true;
11743 
11744   Negated = true;
11745   if (isPowerOf2_64(-SplatVal)) {
11746     SplatVal = -SplatVal;
11747     return true;
11748   }
11749 
11750   return false;
11751 }
11752 
11753 SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
11754   EVT VT = Op.getValueType();
11755   SDLoc dl(Op);
11756 
11757   if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
11758     return LowerFixedLengthVectorIntDivideToSVE(Op, DAG);
11759 
11760   assert(VT.isScalableVector() && "Expected a scalable vector.");
11761 
11762   bool Signed = Op.getOpcode() == ISD::SDIV;
11763   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
11764 
11765   bool Negated;
11766   uint64_t SplatVal;
11767   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
11768     SDValue Pg = getPredicateForScalableVector(DAG, dl, VT);
11769     SDValue Res =
11770         DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0),
11771                     DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32));
11772     if (Negated)
11773       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
11774 
11775     return Res;
11776   }
11777 
11778   if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64)
11779     return LowerToPredicatedOp(Op, DAG, PredOpcode);
11780 
11781   // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit
11782   // operations, and truncate the result.
11783   EVT WidenedVT;
11784   if (VT == MVT::nxv16i8)
11785     WidenedVT = MVT::nxv8i16;
11786   else if (VT == MVT::nxv8i16)
11787     WidenedVT = MVT::nxv4i32;
11788   else
11789     llvm_unreachable("Unexpected Custom DIV operation");
11790 
11791   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
11792   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
11793   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0));
11794   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1));
11795   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0));
11796   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
11797   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
11798   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
11799   return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
11800 }
11801 
11802 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
11803   // Currently no fixed length shuffles that require SVE are legal.
11804   if (useSVEForFixedLengthVectorVT(VT))
11805     return false;
11806 
11807   if (VT.getVectorNumElements() == 4 &&
11808       (VT.is128BitVector() || VT.is64BitVector())) {
11809     unsigned Cost = getPerfectShuffleCost(M);
11810     if (Cost <= 1)
11811       return true;
11812   }
11813 
11814   bool DummyBool;
11815   int DummyInt;
11816   unsigned DummyUnsigned;
11817 
11818   return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
11819           isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
11820           isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
11821           // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
11822           isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
11823           isZIPMask(M, VT, DummyUnsigned) ||
11824           isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
11825           isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
11826           isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
11827           isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
11828           isConcatMask(M, VT, VT.getSizeInBits() == 128));
11829 }
11830 
11831 bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
11832                                                    EVT VT) const {
11833   // Just delegate to the generic legality, clear masks aren't special.
11834   return isShuffleMaskLegal(M, VT);
11835 }
11836 
11837 /// getVShiftImm - Check if this is a valid build_vector for the immediate
11838 /// operand of a vector shift operation, where all the elements of the
11839 /// build_vector must have the same constant integer value.
11840 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11841   // Ignore bit_converts.
11842   while (Op.getOpcode() == ISD::BITCAST)
11843     Op = Op.getOperand(0);
11844   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11845   APInt SplatBits, SplatUndef;
11846   unsigned SplatBitSize;
11847   bool HasAnyUndefs;
11848   if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11849                                     HasAnyUndefs, ElementBits) ||
11850       SplatBitSize > ElementBits)
11851     return false;
11852   Cnt = SplatBits.getSExtValue();
11853   return true;
11854 }
11855 
11856 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11857 /// operand of a vector shift left operation.  That value must be in the range:
11858 ///   0 <= Value < ElementBits for a left shift; or
11859 ///   0 <= Value <= ElementBits for a long left shift.
11860 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11861   assert(VT.isVector() && "vector shift count is not a vector type");
11862   int64_t ElementBits = VT.getScalarSizeInBits();
11863   if (!getVShiftImm(Op, ElementBits, Cnt))
11864     return false;
11865   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
11866 }
11867 
11868 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11869 /// operand of a vector shift right operation. The value must be in the range:
11870 ///   1 <= Value <= ElementBits for a right shift; or
11871 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
11872   assert(VT.isVector() && "vector shift count is not a vector type");
11873   int64_t ElementBits = VT.getScalarSizeInBits();
11874   if (!getVShiftImm(Op, ElementBits, Cnt))
11875     return false;
11876   return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
11877 }
11878 
11879 SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
11880                                              SelectionDAG &DAG) const {
11881   EVT VT = Op.getValueType();
11882 
11883   if (VT.getScalarType() == MVT::i1) {
11884     // Lower i1 truncate to `(x & 1) != 0`.
11885     SDLoc dl(Op);
11886     EVT OpVT = Op.getOperand(0).getValueType();
11887     SDValue Zero = DAG.getConstant(0, dl, OpVT);
11888     SDValue One = DAG.getConstant(1, dl, OpVT);
11889     SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One);
11890     return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE);
11891   }
11892 
11893   if (!VT.isVector() || VT.isScalableVector())
11894     return SDValue();
11895 
11896   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
11897     return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
11898 
11899   return SDValue();
11900 }
11901 
11902 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
11903                                                       SelectionDAG &DAG) const {
11904   EVT VT = Op.getValueType();
11905   SDLoc DL(Op);
11906   int64_t Cnt;
11907 
11908   if (!Op.getOperand(1).getValueType().isVector())
11909     return Op;
11910   unsigned EltSize = VT.getScalarSizeInBits();
11911 
11912   switch (Op.getOpcode()) {
11913   case ISD::SHL:
11914     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
11915       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
11916 
11917     if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
11918       return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
11919                          DAG.getConstant(Cnt, DL, MVT::i32));
11920     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11921                        DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
11922                                        MVT::i32),
11923                        Op.getOperand(0), Op.getOperand(1));
11924   case ISD::SRA:
11925   case ISD::SRL:
11926     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) {
11927       unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
11928                                                 : AArch64ISD::SRL_PRED;
11929       return LowerToPredicatedOp(Op, DAG, Opc);
11930     }
11931 
11932     // Right shift immediate
11933     if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
11934       unsigned Opc =
11935           (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
11936       return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
11937                          DAG.getConstant(Cnt, DL, MVT::i32));
11938     }
11939 
11940     // Right shift register.  Note, there is not a shift right register
11941     // instruction, but the shift left register instruction takes a signed
11942     // value, where negative numbers specify a right shift.
11943     unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
11944                                                 : Intrinsic::aarch64_neon_ushl;
11945     // negate the shift amount
11946     SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
11947                                    Op.getOperand(1));
11948     SDValue NegShiftLeft =
11949         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11950                     DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
11951                     NegShift);
11952     return NegShiftLeft;
11953   }
11954 
11955   llvm_unreachable("unexpected shift opcode");
11956 }
11957 
11958 static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
11959                                     AArch64CC::CondCode CC, bool NoNans, EVT VT,
11960                                     const SDLoc &dl, SelectionDAG &DAG) {
11961   EVT SrcVT = LHS.getValueType();
11962   assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
11963          "function only supposed to emit natural comparisons");
11964 
11965   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
11966   APInt CnstBits(VT.getSizeInBits(), 0);
11967   APInt UndefBits(VT.getSizeInBits(), 0);
11968   bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
11969   bool IsZero = IsCnst && (CnstBits == 0);
11970 
11971   if (SrcVT.getVectorElementType().isFloatingPoint()) {
11972     switch (CC) {
11973     default:
11974       return SDValue();
11975     case AArch64CC::NE: {
11976       SDValue Fcmeq;
11977       if (IsZero)
11978         Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
11979       else
11980         Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
11981       return DAG.getNOT(dl, Fcmeq, VT);
11982     }
11983     case AArch64CC::EQ:
11984       if (IsZero)
11985         return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
11986       return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
11987     case AArch64CC::GE:
11988       if (IsZero)
11989         return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
11990       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
11991     case AArch64CC::GT:
11992       if (IsZero)
11993         return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
11994       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
11995     case AArch64CC::LE:
11996       if (!NoNans)
11997         return SDValue();
11998       // If we ignore NaNs then we can use to the LS implementation.
11999       LLVM_FALLTHROUGH;
12000     case AArch64CC::LS:
12001       if (IsZero)
12002         return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
12003       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
12004     case AArch64CC::LT:
12005       if (!NoNans)
12006         return SDValue();
12007       // If we ignore NaNs then we can use to the MI implementation.
12008       LLVM_FALLTHROUGH;
12009     case AArch64CC::MI:
12010       if (IsZero)
12011         return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
12012       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
12013     }
12014   }
12015 
12016   switch (CC) {
12017   default:
12018     return SDValue();
12019   case AArch64CC::NE: {
12020     SDValue Cmeq;
12021     if (IsZero)
12022       Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12023     else
12024       Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12025     return DAG.getNOT(dl, Cmeq, VT);
12026   }
12027   case AArch64CC::EQ:
12028     if (IsZero)
12029       return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12030     return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12031   case AArch64CC::GE:
12032     if (IsZero)
12033       return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
12034     return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
12035   case AArch64CC::GT:
12036     if (IsZero)
12037       return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
12038     return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
12039   case AArch64CC::LE:
12040     if (IsZero)
12041       return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
12042     return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
12043   case AArch64CC::LS:
12044     return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
12045   case AArch64CC::LO:
12046     return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
12047   case AArch64CC::LT:
12048     if (IsZero)
12049       return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
12050     return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
12051   case AArch64CC::HI:
12052     return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
12053   case AArch64CC::HS:
12054     return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
12055   }
12056 }
12057 
12058 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
12059                                            SelectionDAG &DAG) const {
12060   if (Op.getValueType().isScalableVector())
12061     return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
12062 
12063   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
12064     return LowerFixedLengthVectorSetccToSVE(Op, DAG);
12065 
12066   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
12067   SDValue LHS = Op.getOperand(0);
12068   SDValue RHS = Op.getOperand(1);
12069   EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
12070   SDLoc dl(Op);
12071 
12072   if (LHS.getValueType().getVectorElementType().isInteger()) {
12073     assert(LHS.getValueType() == RHS.getValueType());
12074     AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
12075     SDValue Cmp =
12076         EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
12077     return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12078   }
12079 
12080   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
12081 
12082   // Make v4f16 (only) fcmp operations utilise vector instructions
12083   // v8f16 support will be a litle more complicated
12084   if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) {
12085     if (LHS.getValueType().getVectorNumElements() == 4) {
12086       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS);
12087       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS);
12088       SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC);
12089       DAG.ReplaceAllUsesWith(Op, NewSetcc);
12090       CmpVT = MVT::v4i32;
12091     } else
12092       return SDValue();
12093   }
12094 
12095   assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||
12096           LHS.getValueType().getVectorElementType() != MVT::f128);
12097 
12098   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
12099   // clean.  Some of them require two branches to implement.
12100   AArch64CC::CondCode CC1, CC2;
12101   bool ShouldInvert;
12102   changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
12103 
12104   bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs();
12105   SDValue Cmp =
12106       EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
12107   if (!Cmp.getNode())
12108     return SDValue();
12109 
12110   if (CC2 != AArch64CC::AL) {
12111     SDValue Cmp2 =
12112         EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
12113     if (!Cmp2.getNode())
12114       return SDValue();
12115 
12116     Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
12117   }
12118 
12119   Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12120 
12121   if (ShouldInvert)
12122     Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
12123 
12124   return Cmp;
12125 }
12126 
12127 static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
12128                                   SelectionDAG &DAG) {
12129   SDValue VecOp = ScalarOp.getOperand(0);
12130   auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
12131   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
12132                      DAG.getConstant(0, DL, MVT::i64));
12133 }
12134 
12135 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
12136                                               SelectionDAG &DAG) const {
12137   SDValue Src = Op.getOperand(0);
12138 
12139   // Try to lower fixed length reductions to SVE.
12140   EVT SrcVT = Src.getValueType();
12141   bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND ||
12142                       Op.getOpcode() == ISD::VECREDUCE_OR ||
12143                       Op.getOpcode() == ISD::VECREDUCE_XOR ||
12144                       Op.getOpcode() == ISD::VECREDUCE_FADD ||
12145                       (Op.getOpcode() != ISD::VECREDUCE_ADD &&
12146                        SrcVT.getVectorElementType() == MVT::i64);
12147   if (SrcVT.isScalableVector() ||
12148       useSVEForFixedLengthVectorVT(
12149           SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) {
12150 
12151     if (SrcVT.getVectorElementType() == MVT::i1)
12152       return LowerPredReductionToSVE(Op, DAG);
12153 
12154     switch (Op.getOpcode()) {
12155     case ISD::VECREDUCE_ADD:
12156       return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
12157     case ISD::VECREDUCE_AND:
12158       return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG);
12159     case ISD::VECREDUCE_OR:
12160       return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG);
12161     case ISD::VECREDUCE_SMAX:
12162       return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
12163     case ISD::VECREDUCE_SMIN:
12164       return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
12165     case ISD::VECREDUCE_UMAX:
12166       return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
12167     case ISD::VECREDUCE_UMIN:
12168       return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
12169     case ISD::VECREDUCE_XOR:
12170       return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
12171     case ISD::VECREDUCE_FADD:
12172       return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG);
12173     case ISD::VECREDUCE_FMAX:
12174       return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
12175     case ISD::VECREDUCE_FMIN:
12176       return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG);
12177     default:
12178       llvm_unreachable("Unhandled fixed length reduction");
12179     }
12180   }
12181 
12182   // Lower NEON reductions.
12183   SDLoc dl(Op);
12184   switch (Op.getOpcode()) {
12185   case ISD::VECREDUCE_ADD:
12186     return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
12187   case ISD::VECREDUCE_SMAX:
12188     return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
12189   case ISD::VECREDUCE_SMIN:
12190     return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
12191   case ISD::VECREDUCE_UMAX:
12192     return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
12193   case ISD::VECREDUCE_UMIN:
12194     return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
12195   case ISD::VECREDUCE_FMAX: {
12196     return DAG.getNode(
12197         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12198         DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
12199         Src);
12200   }
12201   case ISD::VECREDUCE_FMIN: {
12202     return DAG.getNode(
12203         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12204         DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
12205         Src);
12206   }
12207   default:
12208     llvm_unreachable("Unhandled reduction");
12209   }
12210 }
12211 
12212 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
12213                                                     SelectionDAG &DAG) const {
12214   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12215   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12216     return SDValue();
12217 
12218   // LSE has an atomic load-add instruction, but not a load-sub.
12219   SDLoc dl(Op);
12220   MVT VT = Op.getSimpleValueType();
12221   SDValue RHS = Op.getOperand(2);
12222   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12223   RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
12224   return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
12225                        Op.getOperand(0), Op.getOperand(1), RHS,
12226                        AN->getMemOperand());
12227 }
12228 
12229 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
12230                                                     SelectionDAG &DAG) const {
12231   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12232   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12233     return SDValue();
12234 
12235   // LSE has an atomic load-clear instruction, but not a load-and.
12236   SDLoc dl(Op);
12237   MVT VT = Op.getSimpleValueType();
12238   SDValue RHS = Op.getOperand(2);
12239   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12240   RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS);
12241   return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(),
12242                        Op.getOperand(0), Op.getOperand(1), RHS,
12243                        AN->getMemOperand());
12244 }
12245 
12246 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
12247     SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const {
12248   SDLoc dl(Op);
12249   EVT PtrVT = getPointerTy(DAG.getDataLayout());
12250   SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0);
12251 
12252   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
12253   const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask();
12254   if (Subtarget->hasCustomCallingConv())
12255     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
12256 
12257   Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size,
12258                      DAG.getConstant(4, dl, MVT::i64));
12259   Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue());
12260   Chain =
12261       DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue),
12262                   Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64),
12263                   DAG.getRegisterMask(Mask), Chain.getValue(1));
12264   // To match the actual intent better, we should read the output from X15 here
12265   // again (instead of potentially spilling it to the stack), but rereading Size
12266   // from X15 here doesn't work at -O0, since it thinks that X15 is undefined
12267   // here.
12268 
12269   Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size,
12270                      DAG.getConstant(4, dl, MVT::i64));
12271   return Chain;
12272 }
12273 
12274 SDValue
12275 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
12276                                                SelectionDAG &DAG) const {
12277   assert(Subtarget->isTargetWindows() &&
12278          "Only Windows alloca probing supported");
12279   SDLoc dl(Op);
12280   // Get the inputs.
12281   SDNode *Node = Op.getNode();
12282   SDValue Chain = Op.getOperand(0);
12283   SDValue Size = Op.getOperand(1);
12284   MaybeAlign Align =
12285       cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
12286   EVT VT = Node->getValueType(0);
12287 
12288   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
12289           "no-stack-arg-probe")) {
12290     SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12291     Chain = SP.getValue(1);
12292     SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12293     if (Align)
12294       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12295                        DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12296     Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12297     SDValue Ops[2] = {SP, Chain};
12298     return DAG.getMergeValues(Ops, dl);
12299   }
12300 
12301   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
12302 
12303   Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
12304 
12305   SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12306   Chain = SP.getValue(1);
12307   SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12308   if (Align)
12309     SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12310                      DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12311   Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12312 
12313   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
12314                              DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
12315 
12316   SDValue Ops[2] = {SP, Chain};
12317   return DAG.getMergeValues(Ops, dl);
12318 }
12319 
12320 SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
12321                                            SelectionDAG &DAG) const {
12322   EVT VT = Op.getValueType();
12323   assert(VT != MVT::i64 && "Expected illegal VSCALE node");
12324 
12325   SDLoc DL(Op);
12326   APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
12327   return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
12328                             VT);
12329 }
12330 
12331 /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
12332 template <unsigned NumVecs>
12333 static bool
12334 setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
12335               AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
12336   Info.opc = ISD::INTRINSIC_VOID;
12337   // Retrieve EC from first vector argument.
12338   const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType());
12339   ElementCount EC = VT.getVectorElementCount();
12340 #ifndef NDEBUG
12341   // Check the assumption that all input vectors are the same type.
12342   for (unsigned I = 0; I < NumVecs; ++I)
12343     assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&
12344            "Invalid type.");
12345 #endif
12346   // memVT is `NumVecs * VT`.
12347   Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
12348                                 EC * NumVecs);
12349   Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1);
12350   Info.offset = 0;
12351   Info.align.reset();
12352   Info.flags = MachineMemOperand::MOStore;
12353   return true;
12354 }
12355 
12356 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12357 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
12358 /// specified in the intrinsic calls.
12359 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12360                                                const CallInst &I,
12361                                                MachineFunction &MF,
12362                                                unsigned Intrinsic) const {
12363   auto &DL = I.getModule()->getDataLayout();
12364   switch (Intrinsic) {
12365   case Intrinsic::aarch64_sve_st2:
12366     return setInfoSVEStN<2>(*this, DL, Info, I);
12367   case Intrinsic::aarch64_sve_st3:
12368     return setInfoSVEStN<3>(*this, DL, Info, I);
12369   case Intrinsic::aarch64_sve_st4:
12370     return setInfoSVEStN<4>(*this, DL, Info, I);
12371   case Intrinsic::aarch64_neon_ld2:
12372   case Intrinsic::aarch64_neon_ld3:
12373   case Intrinsic::aarch64_neon_ld4:
12374   case Intrinsic::aarch64_neon_ld1x2:
12375   case Intrinsic::aarch64_neon_ld1x3:
12376   case Intrinsic::aarch64_neon_ld1x4:
12377   case Intrinsic::aarch64_neon_ld2lane:
12378   case Intrinsic::aarch64_neon_ld3lane:
12379   case Intrinsic::aarch64_neon_ld4lane:
12380   case Intrinsic::aarch64_neon_ld2r:
12381   case Intrinsic::aarch64_neon_ld3r:
12382   case Intrinsic::aarch64_neon_ld4r: {
12383     Info.opc = ISD::INTRINSIC_W_CHAIN;
12384     // Conservatively set memVT to the entire set of vectors loaded.
12385     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12386     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12387     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12388     Info.offset = 0;
12389     Info.align.reset();
12390     // volatile loads with NEON intrinsics not supported
12391     Info.flags = MachineMemOperand::MOLoad;
12392     return true;
12393   }
12394   case Intrinsic::aarch64_neon_st2:
12395   case Intrinsic::aarch64_neon_st3:
12396   case Intrinsic::aarch64_neon_st4:
12397   case Intrinsic::aarch64_neon_st1x2:
12398   case Intrinsic::aarch64_neon_st1x3:
12399   case Intrinsic::aarch64_neon_st1x4:
12400   case Intrinsic::aarch64_neon_st2lane:
12401   case Intrinsic::aarch64_neon_st3lane:
12402   case Intrinsic::aarch64_neon_st4lane: {
12403     Info.opc = ISD::INTRINSIC_VOID;
12404     // Conservatively set memVT to the entire set of vectors stored.
12405     unsigned NumElts = 0;
12406     for (const Value *Arg : I.args()) {
12407       Type *ArgTy = Arg->getType();
12408       if (!ArgTy->isVectorTy())
12409         break;
12410       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12411     }
12412     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12413     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12414     Info.offset = 0;
12415     Info.align.reset();
12416     // volatile stores with NEON intrinsics not supported
12417     Info.flags = MachineMemOperand::MOStore;
12418     return true;
12419   }
12420   case Intrinsic::aarch64_ldaxr:
12421   case Intrinsic::aarch64_ldxr: {
12422     Type *ValTy = I.getParamElementType(0);
12423     Info.opc = ISD::INTRINSIC_W_CHAIN;
12424     Info.memVT = MVT::getVT(ValTy);
12425     Info.ptrVal = I.getArgOperand(0);
12426     Info.offset = 0;
12427     Info.align = DL.getABITypeAlign(ValTy);
12428     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12429     return true;
12430   }
12431   case Intrinsic::aarch64_stlxr:
12432   case Intrinsic::aarch64_stxr: {
12433     Type *ValTy = I.getParamElementType(1);
12434     Info.opc = ISD::INTRINSIC_W_CHAIN;
12435     Info.memVT = MVT::getVT(ValTy);
12436     Info.ptrVal = I.getArgOperand(1);
12437     Info.offset = 0;
12438     Info.align = DL.getABITypeAlign(ValTy);
12439     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12440     return true;
12441   }
12442   case Intrinsic::aarch64_ldaxp:
12443   case Intrinsic::aarch64_ldxp:
12444     Info.opc = ISD::INTRINSIC_W_CHAIN;
12445     Info.memVT = MVT::i128;
12446     Info.ptrVal = I.getArgOperand(0);
12447     Info.offset = 0;
12448     Info.align = Align(16);
12449     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12450     return true;
12451   case Intrinsic::aarch64_stlxp:
12452   case Intrinsic::aarch64_stxp:
12453     Info.opc = ISD::INTRINSIC_W_CHAIN;
12454     Info.memVT = MVT::i128;
12455     Info.ptrVal = I.getArgOperand(2);
12456     Info.offset = 0;
12457     Info.align = Align(16);
12458     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12459     return true;
12460   case Intrinsic::aarch64_sve_ldnt1: {
12461     Type *ElTy = cast<VectorType>(I.getType())->getElementType();
12462     Info.opc = ISD::INTRINSIC_W_CHAIN;
12463     Info.memVT = MVT::getVT(I.getType());
12464     Info.ptrVal = I.getArgOperand(1);
12465     Info.offset = 0;
12466     Info.align = DL.getABITypeAlign(ElTy);
12467     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
12468     return true;
12469   }
12470   case Intrinsic::aarch64_sve_stnt1: {
12471     Type *ElTy =
12472         cast<VectorType>(I.getArgOperand(0)->getType())->getElementType();
12473     Info.opc = ISD::INTRINSIC_W_CHAIN;
12474     Info.memVT = MVT::getVT(I.getOperand(0)->getType());
12475     Info.ptrVal = I.getArgOperand(2);
12476     Info.offset = 0;
12477     Info.align = DL.getABITypeAlign(ElTy);
12478     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
12479     return true;
12480   }
12481   case Intrinsic::aarch64_mops_memset_tag: {
12482     Value *Dst = I.getArgOperand(0);
12483     Value *Val = I.getArgOperand(1);
12484     Info.opc = ISD::INTRINSIC_W_CHAIN;
12485     Info.memVT = MVT::getVT(Val->getType());
12486     Info.ptrVal = Dst;
12487     Info.offset = 0;
12488     Info.align = I.getParamAlign(0).valueOrOne();
12489     Info.flags = MachineMemOperand::MOStore;
12490     // The size of the memory being operated on is unknown at this point
12491     Info.size = MemoryLocation::UnknownSize;
12492     return true;
12493   }
12494   default:
12495     break;
12496   }
12497 
12498   return false;
12499 }
12500 
12501 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
12502                                                   ISD::LoadExtType ExtTy,
12503                                                   EVT NewVT) const {
12504   // TODO: This may be worth removing. Check regression tests for diffs.
12505   if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
12506     return false;
12507 
12508   // If we're reducing the load width in order to avoid having to use an extra
12509   // instruction to do extension then it's probably a good idea.
12510   if (ExtTy != ISD::NON_EXTLOAD)
12511     return true;
12512   // Don't reduce load width if it would prevent us from combining a shift into
12513   // the offset.
12514   MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
12515   assert(Mem);
12516   const SDValue &Base = Mem->getBasePtr();
12517   if (Base.getOpcode() == ISD::ADD &&
12518       Base.getOperand(1).getOpcode() == ISD::SHL &&
12519       Base.getOperand(1).hasOneUse() &&
12520       Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
12521     // It's unknown whether a scalable vector has a power-of-2 bitwidth.
12522     if (Mem->getMemoryVT().isScalableVector())
12523       return false;
12524     // The shift can be combined if it matches the size of the value being
12525     // loaded (and so reducing the width would make it not match).
12526     uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
12527     uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
12528     if (ShiftAmount == Log2_32(LoadBytes))
12529       return false;
12530   }
12531   // We have no reason to disallow reducing the load width, so allow it.
12532   return true;
12533 }
12534 
12535 // Truncations from 64-bit GPR to 32-bit GPR is free.
12536 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
12537   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12538     return false;
12539   uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize();
12540   uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize();
12541   return NumBits1 > NumBits2;
12542 }
12543 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
12544   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12545     return false;
12546   uint64_t NumBits1 = VT1.getFixedSizeInBits();
12547   uint64_t NumBits2 = VT2.getFixedSizeInBits();
12548   return NumBits1 > NumBits2;
12549 }
12550 
12551 /// Check if it is profitable to hoist instruction in then/else to if.
12552 /// Not profitable if I and it's user can form a FMA instruction
12553 /// because we prefer FMSUB/FMADD.
12554 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
12555   if (I->getOpcode() != Instruction::FMul)
12556     return true;
12557 
12558   if (!I->hasOneUse())
12559     return true;
12560 
12561   Instruction *User = I->user_back();
12562 
12563   if (!(User->getOpcode() == Instruction::FSub ||
12564         User->getOpcode() == Instruction::FAdd))
12565     return true;
12566 
12567   const TargetOptions &Options = getTargetMachine().Options;
12568   const Function *F = I->getFunction();
12569   const DataLayout &DL = F->getParent()->getDataLayout();
12570   Type *Ty = User->getOperand(0)->getType();
12571 
12572   return !(isFMAFasterThanFMulAndFAdd(*F, Ty) &&
12573            isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
12574            (Options.AllowFPOpFusion == FPOpFusion::Fast ||
12575             Options.UnsafeFPMath));
12576 }
12577 
12578 // All 32-bit GPR operations implicitly zero the high-half of the corresponding
12579 // 64-bit GPR.
12580 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
12581   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12582     return false;
12583   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
12584   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
12585   return NumBits1 == 32 && NumBits2 == 64;
12586 }
12587 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
12588   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12589     return false;
12590   unsigned NumBits1 = VT1.getSizeInBits();
12591   unsigned NumBits2 = VT2.getSizeInBits();
12592   return NumBits1 == 32 && NumBits2 == 64;
12593 }
12594 
12595 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12596   EVT VT1 = Val.getValueType();
12597   if (isZExtFree(VT1, VT2)) {
12598     return true;
12599   }
12600 
12601   if (Val.getOpcode() != ISD::LOAD)
12602     return false;
12603 
12604   // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
12605   return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
12606           VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
12607           VT1.getSizeInBits() <= 32);
12608 }
12609 
12610 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
12611   if (isa<FPExtInst>(Ext))
12612     return false;
12613 
12614   // Vector types are not free.
12615   if (Ext->getType()->isVectorTy())
12616     return false;
12617 
12618   for (const Use &U : Ext->uses()) {
12619     // The extension is free if we can fold it with a left shift in an
12620     // addressing mode or an arithmetic operation: add, sub, and cmp.
12621 
12622     // Is there a shift?
12623     const Instruction *Instr = cast<Instruction>(U.getUser());
12624 
12625     // Is this a constant shift?
12626     switch (Instr->getOpcode()) {
12627     case Instruction::Shl:
12628       if (!isa<ConstantInt>(Instr->getOperand(1)))
12629         return false;
12630       break;
12631     case Instruction::GetElementPtr: {
12632       gep_type_iterator GTI = gep_type_begin(Instr);
12633       auto &DL = Ext->getModule()->getDataLayout();
12634       std::advance(GTI, U.getOperandNo()-1);
12635       Type *IdxTy = GTI.getIndexedType();
12636       // This extension will end up with a shift because of the scaling factor.
12637       // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
12638       // Get the shift amount based on the scaling factor:
12639       // log2(sizeof(IdxTy)) - log2(8).
12640       uint64_t ShiftAmt =
12641         countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3;
12642       // Is the constant foldable in the shift of the addressing mode?
12643       // I.e., shift amount is between 1 and 4 inclusive.
12644       if (ShiftAmt == 0 || ShiftAmt > 4)
12645         return false;
12646       break;
12647     }
12648     case Instruction::Trunc:
12649       // Check if this is a noop.
12650       // trunc(sext ty1 to ty2) to ty1.
12651       if (Instr->getType() == Ext->getOperand(0)->getType())
12652         continue;
12653       LLVM_FALLTHROUGH;
12654     default:
12655       return false;
12656     }
12657 
12658     // At this point we can use the bfm family, so this extension is free
12659     // for that use.
12660   }
12661   return true;
12662 }
12663 
12664 /// Check if both Op1 and Op2 are shufflevector extracts of either the lower
12665 /// or upper half of the vector elements.
12666 static bool areExtractShuffleVectors(Value *Op1, Value *Op2) {
12667   auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
12668     auto *FullTy = FullV->getType();
12669     auto *HalfTy = HalfV->getType();
12670     return FullTy->getPrimitiveSizeInBits().getFixedSize() ==
12671            2 * HalfTy->getPrimitiveSizeInBits().getFixedSize();
12672   };
12673 
12674   auto extractHalf = [](Value *FullV, Value *HalfV) {
12675     auto *FullVT = cast<FixedVectorType>(FullV->getType());
12676     auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
12677     return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
12678   };
12679 
12680   ArrayRef<int> M1, M2;
12681   Value *S1Op1, *S2Op1;
12682   if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
12683       !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
12684     return false;
12685 
12686   // Check that the operands are half as wide as the result and we extract
12687   // half of the elements of the input vectors.
12688   if (!areTypesHalfed(S1Op1, Op1) || !areTypesHalfed(S2Op1, Op2) ||
12689       !extractHalf(S1Op1, Op1) || !extractHalf(S2Op1, Op2))
12690     return false;
12691 
12692   // Check the mask extracts either the lower or upper half of vector
12693   // elements.
12694   int M1Start = -1;
12695   int M2Start = -1;
12696   int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
12697   if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) ||
12698       !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) ||
12699       M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2)))
12700     return false;
12701 
12702   return true;
12703 }
12704 
12705 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
12706 /// of the vector elements.
12707 static bool areExtractExts(Value *Ext1, Value *Ext2) {
12708   auto areExtDoubled = [](Instruction *Ext) {
12709     return Ext->getType()->getScalarSizeInBits() ==
12710            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
12711   };
12712 
12713   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
12714       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
12715       !areExtDoubled(cast<Instruction>(Ext1)) ||
12716       !areExtDoubled(cast<Instruction>(Ext2)))
12717     return false;
12718 
12719   return true;
12720 }
12721 
12722 /// Check if Op could be used with vmull_high_p64 intrinsic.
12723 static bool isOperandOfVmullHighP64(Value *Op) {
12724   Value *VectorOperand = nullptr;
12725   ConstantInt *ElementIndex = nullptr;
12726   return match(Op, m_ExtractElt(m_Value(VectorOperand),
12727                                 m_ConstantInt(ElementIndex))) &&
12728          ElementIndex->getValue() == 1 &&
12729          isa<FixedVectorType>(VectorOperand->getType()) &&
12730          cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
12731 }
12732 
12733 /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
12734 static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
12735   return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2);
12736 }
12737 
12738 static bool isSplatShuffle(Value *V) {
12739   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
12740     return is_splat(Shuf->getShuffleMask());
12741   return false;
12742 }
12743 
12744 /// Check if sinking \p I's operands to I's basic block is profitable, because
12745 /// the operands can be folded into a target instruction, e.g.
12746 /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
12747 bool AArch64TargetLowering::shouldSinkOperands(
12748     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
12749   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
12750     switch (II->getIntrinsicID()) {
12751     case Intrinsic::aarch64_neon_smull:
12752     case Intrinsic::aarch64_neon_umull:
12753       if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) {
12754         Ops.push_back(&II->getOperandUse(0));
12755         Ops.push_back(&II->getOperandUse(1));
12756         return true;
12757       }
12758       LLVM_FALLTHROUGH;
12759 
12760     case Intrinsic::fma:
12761       if (isa<VectorType>(I->getType()) &&
12762           cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
12763           !Subtarget->hasFullFP16())
12764         return false;
12765       LLVM_FALLTHROUGH;
12766     case Intrinsic::aarch64_neon_sqdmull:
12767     case Intrinsic::aarch64_neon_sqdmulh:
12768     case Intrinsic::aarch64_neon_sqrdmulh:
12769       // Sink splats for index lane variants
12770       if (isSplatShuffle(II->getOperand(0)))
12771         Ops.push_back(&II->getOperandUse(0));
12772       if (isSplatShuffle(II->getOperand(1)))
12773         Ops.push_back(&II->getOperandUse(1));
12774       return !Ops.empty();
12775     case Intrinsic::aarch64_sme_write_horiz:
12776     case Intrinsic::aarch64_sme_write_vert:
12777     case Intrinsic::aarch64_sme_writeq_horiz:
12778     case Intrinsic::aarch64_sme_writeq_vert: {
12779       auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
12780       if (!Idx || Idx->getOpcode() != Instruction::Add)
12781         return false;
12782       Ops.push_back(&II->getOperandUse(1));
12783       return true;
12784     }
12785     case Intrinsic::aarch64_sme_read_horiz:
12786     case Intrinsic::aarch64_sme_read_vert:
12787     case Intrinsic::aarch64_sme_readq_horiz:
12788     case Intrinsic::aarch64_sme_readq_vert:
12789     case Intrinsic::aarch64_sme_ld1b_vert:
12790     case Intrinsic::aarch64_sme_ld1h_vert:
12791     case Intrinsic::aarch64_sme_ld1w_vert:
12792     case Intrinsic::aarch64_sme_ld1d_vert:
12793     case Intrinsic::aarch64_sme_ld1q_vert:
12794     case Intrinsic::aarch64_sme_st1b_vert:
12795     case Intrinsic::aarch64_sme_st1h_vert:
12796     case Intrinsic::aarch64_sme_st1w_vert:
12797     case Intrinsic::aarch64_sme_st1d_vert:
12798     case Intrinsic::aarch64_sme_st1q_vert:
12799     case Intrinsic::aarch64_sme_ld1b_horiz:
12800     case Intrinsic::aarch64_sme_ld1h_horiz:
12801     case Intrinsic::aarch64_sme_ld1w_horiz:
12802     case Intrinsic::aarch64_sme_ld1d_horiz:
12803     case Intrinsic::aarch64_sme_ld1q_horiz:
12804     case Intrinsic::aarch64_sme_st1b_horiz:
12805     case Intrinsic::aarch64_sme_st1h_horiz:
12806     case Intrinsic::aarch64_sme_st1w_horiz:
12807     case Intrinsic::aarch64_sme_st1d_horiz:
12808     case Intrinsic::aarch64_sme_st1q_horiz: {
12809       auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
12810       if (!Idx || Idx->getOpcode() != Instruction::Add)
12811         return false;
12812       Ops.push_back(&II->getOperandUse(3));
12813       return true;
12814     }
12815     case Intrinsic::aarch64_neon_pmull:
12816       if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
12817         return false;
12818       Ops.push_back(&II->getOperandUse(0));
12819       Ops.push_back(&II->getOperandUse(1));
12820       return true;
12821     case Intrinsic::aarch64_neon_pmull64:
12822       if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
12823                                      II->getArgOperand(1)))
12824         return false;
12825       Ops.push_back(&II->getArgOperandUse(0));
12826       Ops.push_back(&II->getArgOperandUse(1));
12827       return true;
12828     default:
12829       return false;
12830     }
12831   }
12832 
12833   if (!I->getType()->isVectorTy())
12834     return false;
12835 
12836   switch (I->getOpcode()) {
12837   case Instruction::Sub:
12838   case Instruction::Add: {
12839     if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
12840       return false;
12841 
12842     // If the exts' operands extract either the lower or upper elements, we
12843     // can sink them too.
12844     auto Ext1 = cast<Instruction>(I->getOperand(0));
12845     auto Ext2 = cast<Instruction>(I->getOperand(1));
12846     if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
12847       Ops.push_back(&Ext1->getOperandUse(0));
12848       Ops.push_back(&Ext2->getOperandUse(0));
12849     }
12850 
12851     Ops.push_back(&I->getOperandUse(0));
12852     Ops.push_back(&I->getOperandUse(1));
12853 
12854     return true;
12855   }
12856   case Instruction::Mul: {
12857     bool IsProfitable = false;
12858     for (auto &Op : I->operands()) {
12859       // Make sure we are not already sinking this operand
12860       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
12861         continue;
12862 
12863       ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op);
12864       if (!Shuffle || !Shuffle->isZeroEltSplat())
12865         continue;
12866 
12867       Value *ShuffleOperand = Shuffle->getOperand(0);
12868       InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
12869       if (!Insert)
12870         continue;
12871 
12872       Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
12873       if (!OperandInstr)
12874         continue;
12875 
12876       ConstantInt *ElementConstant =
12877           dyn_cast<ConstantInt>(Insert->getOperand(2));
12878       // Check that the insertelement is inserting into element 0
12879       if (!ElementConstant || ElementConstant->getZExtValue() != 0)
12880         continue;
12881 
12882       unsigned Opcode = OperandInstr->getOpcode();
12883       if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt)
12884         continue;
12885 
12886       Ops.push_back(&Shuffle->getOperandUse(0));
12887       Ops.push_back(&Op);
12888       IsProfitable = true;
12889     }
12890 
12891     return IsProfitable;
12892   }
12893   default:
12894     return false;
12895   }
12896   return false;
12897 }
12898 
12899 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
12900                                           Align &RequiredAligment) const {
12901   if (!LoadedType.isSimple() ||
12902       (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
12903     return false;
12904   // Cyclone supports unaligned accesses.
12905   RequiredAligment = Align(1);
12906   unsigned NumBits = LoadedType.getSizeInBits();
12907   return NumBits == 32 || NumBits == 64;
12908 }
12909 
12910 /// A helper function for determining the number of interleaved accesses we
12911 /// will generate when lowering accesses of the given type.
12912 unsigned AArch64TargetLowering::getNumInterleavedAccesses(
12913     VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {
12914   unsigned VecSize = UseScalable ? Subtarget->getMinSVEVectorSizeInBits() : 128;
12915   return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize);
12916 }
12917 
12918 MachineMemOperand::Flags
12919 AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
12920   if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
12921       I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
12922     return MOStridedAccess;
12923   return MachineMemOperand::MONone;
12924 }
12925 
12926 bool AArch64TargetLowering::isLegalInterleavedAccessType(
12927     VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {
12928 
12929   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
12930   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
12931   unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
12932 
12933   UseScalable = false;
12934 
12935   // Ensure the number of vector elements is greater than 1.
12936   if (NumElements < 2)
12937     return false;
12938 
12939   // Ensure the element type is legal.
12940   if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
12941     return false;
12942 
12943   if (Subtarget->useSVEForFixedLengthVectors() &&
12944       (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
12945        (VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
12946         isPowerOf2_32(NumElements) && VecSize > 128))) {
12947     UseScalable = true;
12948     return true;
12949   }
12950 
12951   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
12952   // 128 will be split into multiple interleaved accesses.
12953   return VecSize == 64 || VecSize % 128 == 0;
12954 }
12955 
12956 static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {
12957   if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext()))
12958     return ScalableVectorType::get(VTy->getElementType(), 2);
12959 
12960   if (VTy->getElementType() == Type::getFloatTy(VTy->getContext()))
12961     return ScalableVectorType::get(VTy->getElementType(), 4);
12962 
12963   if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext()))
12964     return ScalableVectorType::get(VTy->getElementType(), 8);
12965 
12966   if (VTy->getElementType() == Type::getHalfTy(VTy->getContext()))
12967     return ScalableVectorType::get(VTy->getElementType(), 8);
12968 
12969   if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext()))
12970     return ScalableVectorType::get(VTy->getElementType(), 2);
12971 
12972   if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext()))
12973     return ScalableVectorType::get(VTy->getElementType(), 4);
12974 
12975   if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext()))
12976     return ScalableVectorType::get(VTy->getElementType(), 8);
12977 
12978   if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext()))
12979     return ScalableVectorType::get(VTy->getElementType(), 16);
12980 
12981   llvm_unreachable("Cannot handle input vector type");
12982 }
12983 
12984 /// Lower an interleaved load into a ldN intrinsic.
12985 ///
12986 /// E.g. Lower an interleaved load (Factor = 2):
12987 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr
12988 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
12989 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
12990 ///
12991 ///      Into:
12992 ///        %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
12993 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
12994 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
12995 bool AArch64TargetLowering::lowerInterleavedLoad(
12996     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
12997     ArrayRef<unsigned> Indices, unsigned Factor) const {
12998   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
12999          "Invalid interleave factor");
13000   assert(!Shuffles.empty() && "Empty shufflevector input");
13001   assert(Shuffles.size() == Indices.size() &&
13002          "Unmatched number of shufflevectors and indices");
13003 
13004   const DataLayout &DL = LI->getModule()->getDataLayout();
13005 
13006   VectorType *VTy = Shuffles[0]->getType();
13007 
13008   // Skip if we do not have NEON and skip illegal vector types. We can
13009   // "legalize" wide vector types into multiple interleaved accesses as long as
13010   // the vector types are divisible by 128.
13011   bool UseScalable;
13012   if (!Subtarget->hasNEON() ||
13013       !isLegalInterleavedAccessType(VTy, DL, UseScalable))
13014     return false;
13015 
13016   unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
13017 
13018   auto *FVTy = cast<FixedVectorType>(VTy);
13019 
13020   // A pointer vector can not be the return type of the ldN intrinsics. Need to
13021   // load integer vectors first and then convert to pointer vectors.
13022   Type *EltTy = FVTy->getElementType();
13023   if (EltTy->isPointerTy())
13024     FVTy =
13025         FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements());
13026 
13027   // If we're going to generate more than one load, reset the sub-vector type
13028   // to something legal.
13029   FVTy = FixedVectorType::get(FVTy->getElementType(),
13030                               FVTy->getNumElements() / NumLoads);
13031 
13032   auto *LDVTy =
13033       UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy;
13034 
13035   IRBuilder<> Builder(LI);
13036 
13037   // The base address of the load.
13038   Value *BaseAddr = LI->getPointerOperand();
13039 
13040   if (NumLoads > 1) {
13041     // We will compute the pointer operand of each load from the original base
13042     // address using GEPs. Cast the base address to a pointer to the scalar
13043     // element type.
13044     BaseAddr = Builder.CreateBitCast(
13045         BaseAddr,
13046         LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
13047   }
13048 
13049   Type *PtrTy =
13050       UseScalable
13051           ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())
13052           : LDVTy->getPointerTo(LI->getPointerAddressSpace());
13053   Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
13054                                  LDVTy->getElementCount());
13055 
13056   static const Intrinsic::ID SVELoadIntrs[3] = {
13057       Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret,
13058       Intrinsic::aarch64_sve_ld4_sret};
13059   static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2,
13060                                                  Intrinsic::aarch64_neon_ld3,
13061                                                  Intrinsic::aarch64_neon_ld4};
13062   Function *LdNFunc;
13063   if (UseScalable)
13064     LdNFunc = Intrinsic::getDeclaration(LI->getModule(),
13065                                         SVELoadIntrs[Factor - 2], {LDVTy});
13066   else
13067     LdNFunc = Intrinsic::getDeclaration(
13068         LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy});
13069 
13070   // Holds sub-vectors extracted from the load intrinsic return values. The
13071   // sub-vectors are associated with the shufflevector instructions they will
13072   // replace.
13073   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13074 
13075   Value *PTrue = nullptr;
13076   if (UseScalable) {
13077     Optional<unsigned> PgPattern =
13078         getSVEPredPatternFromNumElements(FVTy->getNumElements());
13079     if (Subtarget->getMinSVEVectorSizeInBits() ==
13080             Subtarget->getMaxSVEVectorSizeInBits() &&
13081         Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy))
13082       PgPattern = AArch64SVEPredPattern::all;
13083 
13084     auto *PTruePat =
13085         ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
13086     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13087                                     {PTruePat});
13088   }
13089 
13090   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13091 
13092     // If we're generating more than one load, compute the base address of
13093     // subsequent loads as an offset from the previous.
13094     if (LoadCount > 0)
13095       BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr,
13096                                             FVTy->getNumElements() * Factor);
13097 
13098     CallInst *LdN;
13099     if (UseScalable)
13100       LdN = Builder.CreateCall(
13101           LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
13102     else
13103       LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
13104                                "ldN");
13105 
13106     // Extract and store the sub-vectors returned by the load intrinsic.
13107     for (unsigned i = 0; i < Shuffles.size(); i++) {
13108       ShuffleVectorInst *SVI = Shuffles[i];
13109       unsigned Index = Indices[i];
13110 
13111       Value *SubVec = Builder.CreateExtractValue(LdN, Index);
13112 
13113       if (UseScalable)
13114         SubVec = Builder.CreateExtractVector(
13115             FVTy, SubVec,
13116             ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0));
13117 
13118       // Convert the integer vector to pointer vector if the element is pointer.
13119       if (EltTy->isPointerTy())
13120         SubVec = Builder.CreateIntToPtr(
13121             SubVec, FixedVectorType::get(SVI->getType()->getElementType(),
13122                                          FVTy->getNumElements()));
13123 
13124       SubVecs[SVI].push_back(SubVec);
13125     }
13126   }
13127 
13128   // Replace uses of the shufflevector instructions with the sub-vectors
13129   // returned by the load intrinsic. If a shufflevector instruction is
13130   // associated with more than one sub-vector, those sub-vectors will be
13131   // concatenated into a single wide vector.
13132   for (ShuffleVectorInst *SVI : Shuffles) {
13133     auto &SubVec = SubVecs[SVI];
13134     auto *WideVec =
13135         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13136     SVI->replaceAllUsesWith(WideVec);
13137   }
13138 
13139   return true;
13140 }
13141 
13142 /// Lower an interleaved store into a stN intrinsic.
13143 ///
13144 /// E.g. Lower an interleaved store (Factor = 3):
13145 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13146 ///                 <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13147 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13148 ///
13149 ///      Into:
13150 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13151 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13152 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13153 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13154 ///
13155 /// Note that the new shufflevectors will be removed and we'll only generate one
13156 /// st3 instruction in CodeGen.
13157 ///
13158 /// Example for a more general valid mask (Factor 3). Lower:
13159 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13160 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13161 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13162 ///
13163 ///      Into:
13164 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13165 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13166 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13167 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13168 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
13169                                                   ShuffleVectorInst *SVI,
13170                                                   unsigned Factor) const {
13171   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13172          "Invalid interleave factor");
13173 
13174   auto *VecTy = cast<FixedVectorType>(SVI->getType());
13175   assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
13176 
13177   unsigned LaneLen = VecTy->getNumElements() / Factor;
13178   Type *EltTy = VecTy->getElementType();
13179   auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
13180 
13181   const DataLayout &DL = SI->getModule()->getDataLayout();
13182   bool UseScalable;
13183 
13184   // Skip if we do not have NEON and skip illegal vector types. We can
13185   // "legalize" wide vector types into multiple interleaved accesses as long as
13186   // the vector types are divisible by 128.
13187   if (!Subtarget->hasNEON() ||
13188       !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
13189     return false;
13190 
13191   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
13192 
13193   Value *Op0 = SVI->getOperand(0);
13194   Value *Op1 = SVI->getOperand(1);
13195   IRBuilder<> Builder(SI);
13196 
13197   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13198   // vectors to integer vectors.
13199   if (EltTy->isPointerTy()) {
13200     Type *IntTy = DL.getIntPtrType(EltTy);
13201     unsigned NumOpElts =
13202         cast<FixedVectorType>(Op0->getType())->getNumElements();
13203 
13204     // Convert to the corresponding integer vector.
13205     auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts);
13206     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13207     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13208 
13209     SubVecTy = FixedVectorType::get(IntTy, LaneLen);
13210   }
13211 
13212   // If we're going to generate more than one store, reset the lane length
13213   // and sub-vector type to something legal.
13214   LaneLen /= NumStores;
13215   SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
13216 
13217   auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy))
13218                             : SubVecTy;
13219 
13220   // The base address of the store.
13221   Value *BaseAddr = SI->getPointerOperand();
13222 
13223   if (NumStores > 1) {
13224     // We will compute the pointer operand of each store from the original base
13225     // address using GEPs. Cast the base address to a pointer to the scalar
13226     // element type.
13227     BaseAddr = Builder.CreateBitCast(
13228         BaseAddr,
13229         SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
13230   }
13231 
13232   auto Mask = SVI->getShuffleMask();
13233 
13234   Type *PtrTy =
13235       UseScalable
13236           ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())
13237           : STVTy->getPointerTo(SI->getPointerAddressSpace());
13238   Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()),
13239                                  STVTy->getElementCount());
13240 
13241   static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2,
13242                                                  Intrinsic::aarch64_sve_st3,
13243                                                  Intrinsic::aarch64_sve_st4};
13244   static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2,
13245                                                   Intrinsic::aarch64_neon_st3,
13246                                                   Intrinsic::aarch64_neon_st4};
13247   Function *StNFunc;
13248   if (UseScalable)
13249     StNFunc = Intrinsic::getDeclaration(SI->getModule(),
13250                                         SVEStoreIntrs[Factor - 2], {STVTy});
13251   else
13252     StNFunc = Intrinsic::getDeclaration(
13253         SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy});
13254 
13255   Value *PTrue = nullptr;
13256   if (UseScalable) {
13257     Optional<unsigned> PgPattern =
13258         getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
13259     if (Subtarget->getMinSVEVectorSizeInBits() ==
13260             Subtarget->getMaxSVEVectorSizeInBits() &&
13261         Subtarget->getMinSVEVectorSizeInBits() ==
13262             DL.getTypeSizeInBits(SubVecTy))
13263       PgPattern = AArch64SVEPredPattern::all;
13264 
13265     auto *PTruePat =
13266         ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
13267     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13268                                     {PTruePat});
13269   }
13270 
13271   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13272 
13273     SmallVector<Value *, 5> Ops;
13274 
13275     // Split the shufflevector operands into sub vectors for the new stN call.
13276     for (unsigned i = 0; i < Factor; i++) {
13277       Value *Shuffle;
13278       unsigned IdxI = StoreCount * LaneLen * Factor + i;
13279       if (Mask[IdxI] >= 0) {
13280         Shuffle = Builder.CreateShuffleVector(
13281             Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0));
13282       } else {
13283         unsigned StartMask = 0;
13284         for (unsigned j = 1; j < LaneLen; j++) {
13285           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13286           if (Mask[IdxJ * Factor + IdxI] >= 0) {
13287             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13288             break;
13289           }
13290         }
13291         // Note: Filling undef gaps with random elements is ok, since
13292         // those elements were being written anyway (with undefs).
13293         // In the case of all undefs we're defaulting to using elems from 0
13294         // Note: StartMask cannot be negative, it's checked in
13295         // isReInterleaveMask
13296         Shuffle = Builder.CreateShuffleVector(
13297             Op0, Op1, createSequentialMask(StartMask, LaneLen, 0));
13298       }
13299 
13300       if (UseScalable)
13301         Shuffle = Builder.CreateInsertVector(
13302             STVTy, UndefValue::get(STVTy), Shuffle,
13303             ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0));
13304 
13305       Ops.push_back(Shuffle);
13306     }
13307 
13308     if (UseScalable)
13309       Ops.push_back(PTrue);
13310 
13311     // If we generating more than one store, we compute the base address of
13312     // subsequent stores as an offset from the previous.
13313     if (StoreCount > 0)
13314       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
13315                                             BaseAddr, LaneLen * Factor);
13316 
13317     Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
13318     Builder.CreateCall(StNFunc, Ops);
13319   }
13320   return true;
13321 }
13322 
13323 // Lower an SVE structured load intrinsic returning a tuple type to target
13324 // specific intrinsic taking the same input but returning a multi-result value
13325 // of the split tuple type.
13326 //
13327 // E.g. Lowering an LD3:
13328 //
13329 //  call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32(
13330 //                                                    <vscale x 4 x i1> %pred,
13331 //                                                    <vscale x 4 x i32>* %addr)
13332 //
13333 //  Output DAG:
13334 //
13335 //    t0: ch = EntryToken
13336 //        t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0
13337 //        t4: i64,ch = CopyFromReg t0, Register:i64 %1
13338 //    t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4
13339 //    t6: nxv12i32 = concat_vectors t5, t5:1, t5:2
13340 //
13341 // This is called pre-legalization to avoid widening/splitting issues with
13342 // non-power-of-2 tuple types used for LD3, such as nxv12i32.
13343 SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic,
13344                                                   ArrayRef<SDValue> LoadOps,
13345                                                   EVT VT, SelectionDAG &DAG,
13346                                                   const SDLoc &DL) const {
13347   assert(VT.isScalableVector() && "Can only lower scalable vectors");
13348 
13349   unsigned N, Opcode;
13350   static const std::pair<unsigned, std::pair<unsigned, unsigned>>
13351       IntrinsicMap[] = {
13352           {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}},
13353           {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}},
13354           {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}};
13355 
13356   std::tie(N, Opcode) = llvm::find_if(IntrinsicMap, [&](auto P) {
13357                           return P.first == Intrinsic;
13358                         })->second;
13359   assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 &&
13360          "invalid tuple vector type!");
13361 
13362   EVT SplitVT =
13363       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
13364                        VT.getVectorElementCount().divideCoefficientBy(N));
13365   assert(isTypeLegal(SplitVT));
13366 
13367   SmallVector<EVT, 5> VTs(N, SplitVT);
13368   VTs.push_back(MVT::Other); // Chain
13369   SDVTList NodeTys = DAG.getVTList(VTs);
13370 
13371   SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps);
13372   SmallVector<SDValue, 4> PseudoLoadOps;
13373   for (unsigned I = 0; I < N; ++I)
13374     PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I));
13375   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps);
13376 }
13377 
13378 EVT AArch64TargetLowering::getOptimalMemOpType(
13379     const MemOp &Op, const AttributeList &FuncAttributes) const {
13380   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13381   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13382   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13383   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13384   // taken one instruction to materialize the v2i64 zero and one store (with
13385   // restrictive addressing mode). Just do i64 stores.
13386   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13387   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13388     if (Op.isAligned(AlignCheck))
13389       return true;
13390     bool Fast;
13391     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13392                                           MachineMemOperand::MONone, &Fast) &&
13393            Fast;
13394   };
13395 
13396   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13397       AlignmentIsAcceptable(MVT::v16i8, Align(16)))
13398     return MVT::v16i8;
13399   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13400     return MVT::f128;
13401   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13402     return MVT::i64;
13403   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13404     return MVT::i32;
13405   return MVT::Other;
13406 }
13407 
13408 LLT AArch64TargetLowering::getOptimalMemOpLLT(
13409     const MemOp &Op, const AttributeList &FuncAttributes) const {
13410   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13411   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13412   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13413   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13414   // taken one instruction to materialize the v2i64 zero and one store (with
13415   // restrictive addressing mode). Just do i64 stores.
13416   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13417   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13418     if (Op.isAligned(AlignCheck))
13419       return true;
13420     bool Fast;
13421     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13422                                           MachineMemOperand::MONone, &Fast) &&
13423            Fast;
13424   };
13425 
13426   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13427       AlignmentIsAcceptable(MVT::v2i64, Align(16)))
13428     return LLT::fixed_vector(2, 64);
13429   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13430     return LLT::scalar(128);
13431   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13432     return LLT::scalar(64);
13433   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13434     return LLT::scalar(32);
13435   return LLT();
13436 }
13437 
13438 // 12-bit optionally shifted immediates are legal for adds.
13439 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
13440   if (Immed == std::numeric_limits<int64_t>::min()) {
13441     LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
13442                       << ": avoid UB for INT64_MIN\n");
13443     return false;
13444   }
13445   // Same encoding for add/sub, just flip the sign.
13446   Immed = std::abs(Immed);
13447   bool IsLegal = ((Immed >> 12) == 0 ||
13448                   ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
13449   LLVM_DEBUG(dbgs() << "Is " << Immed
13450                     << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
13451   return IsLegal;
13452 }
13453 
13454 // Return false to prevent folding
13455 // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine,
13456 // if the folding leads to worse code.
13457 bool AArch64TargetLowering::isMulAddWithConstProfitable(
13458     SDValue AddNode, SDValue ConstNode) const {
13459   // Let the DAGCombiner decide for vector types and large types.
13460   const EVT VT = AddNode.getValueType();
13461   if (VT.isVector() || VT.getScalarSizeInBits() > 64)
13462     return true;
13463 
13464   // It is worse if c1 is legal add immediate, while c1*c2 is not
13465   // and has to be composed by at least two instructions.
13466   const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
13467   const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
13468   const int64_t C1 = C1Node->getSExtValue();
13469   const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue();
13470   if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue()))
13471     return true;
13472   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
13473   AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn);
13474   if (Insn.size() > 1)
13475     return false;
13476 
13477   // Default to true and let the DAGCombiner decide.
13478   return true;
13479 }
13480 
13481 // Integer comparisons are implemented with ADDS/SUBS, so the range of valid
13482 // immediates is the same as for an add or a sub.
13483 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
13484   return isLegalAddImmediate(Immed);
13485 }
13486 
13487 /// isLegalAddressingMode - Return true if the addressing mode represented
13488 /// by AM is legal for this target, for a load/store of the specified type.
13489 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
13490                                                   const AddrMode &AM, Type *Ty,
13491                                                   unsigned AS, Instruction *I) const {
13492   // AArch64 has five basic addressing modes:
13493   //  reg
13494   //  reg + 9-bit signed offset
13495   //  reg + SIZE_IN_BYTES * 12-bit unsigned offset
13496   //  reg1 + reg2
13497   //  reg + SIZE_IN_BYTES * reg
13498 
13499   // No global is ever allowed as a base.
13500   if (AM.BaseGV)
13501     return false;
13502 
13503   // No reg+reg+imm addressing.
13504   if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
13505     return false;
13506 
13507   // FIXME: Update this method to support scalable addressing modes.
13508   if (isa<ScalableVectorType>(Ty)) {
13509     uint64_t VecElemNumBytes =
13510         DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
13511     return AM.HasBaseReg && !AM.BaseOffs &&
13512            (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
13513   }
13514 
13515   // check reg + imm case:
13516   // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
13517   uint64_t NumBytes = 0;
13518   if (Ty->isSized()) {
13519     uint64_t NumBits = DL.getTypeSizeInBits(Ty);
13520     NumBytes = NumBits / 8;
13521     if (!isPowerOf2_64(NumBits))
13522       NumBytes = 0;
13523   }
13524 
13525   if (!AM.Scale) {
13526     int64_t Offset = AM.BaseOffs;
13527 
13528     // 9-bit signed offset
13529     if (isInt<9>(Offset))
13530       return true;
13531 
13532     // 12-bit unsigned offset
13533     unsigned shift = Log2_64(NumBytes);
13534     if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
13535         // Must be a multiple of NumBytes (NumBytes is a power of 2)
13536         (Offset >> shift) << shift == Offset)
13537       return true;
13538     return false;
13539   }
13540 
13541   // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
13542 
13543   return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
13544 }
13545 
13546 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
13547   // Consider splitting large offset of struct or array.
13548   return true;
13549 }
13550 
13551 InstructionCost AArch64TargetLowering::getScalingFactorCost(
13552     const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const {
13553   // Scaling factors are not free at all.
13554   // Operands                     | Rt Latency
13555   // -------------------------------------------
13556   // Rt, [Xn, Xm]                 | 4
13557   // -------------------------------------------
13558   // Rt, [Xn, Xm, lsl #imm]       | Rn: 4 Rm: 5
13559   // Rt, [Xn, Wm, <extend> #imm]  |
13560   if (isLegalAddressingMode(DL, AM, Ty, AS))
13561     // Scale represents reg2 * scale, thus account for 1 if
13562     // it is not equal to 0 or 1.
13563     return AM.Scale != 0 && AM.Scale != 1;
13564   return -1;
13565 }
13566 
13567 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
13568     const MachineFunction &MF, EVT VT) const {
13569   VT = VT.getScalarType();
13570 
13571   if (!VT.isSimple())
13572     return false;
13573 
13574   switch (VT.getSimpleVT().SimpleTy) {
13575   case MVT::f16:
13576     return Subtarget->hasFullFP16();
13577   case MVT::f32:
13578   case MVT::f64:
13579     return true;
13580   default:
13581     break;
13582   }
13583 
13584   return false;
13585 }
13586 
13587 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
13588                                                        Type *Ty) const {
13589   switch (Ty->getScalarType()->getTypeID()) {
13590   case Type::FloatTyID:
13591   case Type::DoubleTyID:
13592     return true;
13593   default:
13594     return false;
13595   }
13596 }
13597 
13598 bool AArch64TargetLowering::generateFMAsInMachineCombiner(
13599     EVT VT, CodeGenOpt::Level OptLevel) const {
13600   return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() &&
13601          !useSVEForFixedLengthVectorVT(VT);
13602 }
13603 
13604 const MCPhysReg *
13605 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
13606   // LR is a callee-save register, but we must treat it as clobbered by any call
13607   // site. Hence we include LR in the scratch registers, which are in turn added
13608   // as implicit-defs for stackmaps and patchpoints.
13609   static const MCPhysReg ScratchRegs[] = {
13610     AArch64::X16, AArch64::X17, AArch64::LR, 0
13611   };
13612   return ScratchRegs;
13613 }
13614 
13615 bool
13616 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
13617                                                      CombineLevel Level) const {
13618   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
13619           N->getOpcode() == ISD::SRL) &&
13620          "Expected shift op");
13621 
13622   SDValue ShiftLHS = N->getOperand(0);
13623   EVT VT = N->getValueType(0);
13624 
13625   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not combine
13626   // it with shift 'N' to let it be lowered to UBFX.
13627   if (ShiftLHS.getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
13628       isa<ConstantSDNode>(ShiftLHS.getOperand(1))) {
13629     uint64_t TruncMask = ShiftLHS.getConstantOperandVal(1);
13630     if (isMask_64(TruncMask) &&
13631         ShiftLHS.getOperand(0).getOpcode() == ISD::SRL &&
13632         isa<ConstantSDNode>(ShiftLHS.getOperand(0).getOperand(1)))
13633       return false;
13634   }
13635   return true;
13636 }
13637 
13638 bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
13639     const SDNode *N) const {
13640   assert(N->getOpcode() == ISD::XOR &&
13641          (N->getOperand(0).getOpcode() == ISD::SHL ||
13642           N->getOperand(0).getOpcode() == ISD::SRL) &&
13643          "Expected XOR(SHIFT) pattern");
13644 
13645   // Only commute if the entire NOT mask is a hidden shifted mask.
13646   auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
13647   auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13648   if (XorC && ShiftC) {
13649     unsigned MaskIdx, MaskLen;
13650     if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
13651       unsigned ShiftAmt = ShiftC->getZExtValue();
13652       unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
13653       if (N->getOperand(0).getOpcode() == ISD::SHL)
13654         return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
13655       return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
13656     }
13657   }
13658 
13659   return false;
13660 }
13661 
13662 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
13663     const SDNode *N, CombineLevel Level) const {
13664   assert(((N->getOpcode() == ISD::SHL &&
13665            N->getOperand(0).getOpcode() == ISD::SRL) ||
13666           (N->getOpcode() == ISD::SRL &&
13667            N->getOperand(0).getOpcode() == ISD::SHL)) &&
13668          "Expected shift-shift mask");
13669   // Don't allow multiuse shift folding with the same shift amount.
13670   if (!N->getOperand(0)->hasOneUse())
13671     return false;
13672 
13673   // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns.
13674   EVT VT = N->getValueType(0);
13675   if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) {
13676     auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13677     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
13678     return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue());
13679   }
13680 
13681   return true;
13682 }
13683 
13684 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13685                                                               Type *Ty) const {
13686   assert(Ty->isIntegerTy());
13687 
13688   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13689   if (BitSize == 0)
13690     return false;
13691 
13692   int64_t Val = Imm.getSExtValue();
13693   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
13694     return true;
13695 
13696   if ((int64_t)Val < 0)
13697     Val = ~Val;
13698   if (BitSize == 32)
13699     Val &= (1LL << 32) - 1;
13700 
13701   unsigned LZ = countLeadingZeros((uint64_t)Val);
13702   unsigned Shift = (63 - LZ) / 16;
13703   // MOVZ is free so return true for one or fewer MOVK.
13704   return Shift < 3;
13705 }
13706 
13707 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
13708                                                     unsigned Index) const {
13709   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13710     return false;
13711 
13712   return (Index == 0 || Index == ResVT.getVectorMinNumElements());
13713 }
13714 
13715 /// Turn vector tests of the signbit in the form of:
13716 ///   xor (sra X, elt_size(X)-1), -1
13717 /// into:
13718 ///   cmge X, X, #0
13719 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
13720                                          const AArch64Subtarget *Subtarget) {
13721   EVT VT = N->getValueType(0);
13722   if (!Subtarget->hasNEON() || !VT.isVector())
13723     return SDValue();
13724 
13725   // There must be a shift right algebraic before the xor, and the xor must be a
13726   // 'not' operation.
13727   SDValue Shift = N->getOperand(0);
13728   SDValue Ones = N->getOperand(1);
13729   if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
13730       !ISD::isBuildVectorAllOnes(Ones.getNode()))
13731     return SDValue();
13732 
13733   // The shift should be smearing the sign bit across each vector element.
13734   auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
13735   EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
13736   if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
13737     return SDValue();
13738 
13739   return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
13740 }
13741 
13742 // Given a vecreduce_add node, detect the below pattern and convert it to the
13743 // node sequence with UABDL, [S|U]ADB and UADDLP.
13744 //
13745 // i32 vecreduce_add(
13746 //  v16i32 abs(
13747 //    v16i32 sub(
13748 //     v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b))))
13749 // =================>
13750 // i32 vecreduce_add(
13751 //   v4i32 UADDLP(
13752 //     v8i16 add(
13753 //       v8i16 zext(
13754 //         v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b
13755 //       v8i16 zext(
13756 //         v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b
13757 static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
13758                                                     SelectionDAG &DAG) {
13759   // Assumed i32 vecreduce_add
13760   if (N->getValueType(0) != MVT::i32)
13761     return SDValue();
13762 
13763   SDValue VecReduceOp0 = N->getOperand(0);
13764   unsigned Opcode = VecReduceOp0.getOpcode();
13765   // Assumed v16i32 abs
13766   if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32)
13767     return SDValue();
13768 
13769   SDValue ABS = VecReduceOp0;
13770   // Assumed v16i32 sub
13771   if (ABS->getOperand(0)->getOpcode() != ISD::SUB ||
13772       ABS->getOperand(0)->getValueType(0) != MVT::v16i32)
13773     return SDValue();
13774 
13775   SDValue SUB = ABS->getOperand(0);
13776   unsigned Opcode0 = SUB->getOperand(0).getOpcode();
13777   unsigned Opcode1 = SUB->getOperand(1).getOpcode();
13778   // Assumed v16i32 type
13779   if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 ||
13780       SUB->getOperand(1)->getValueType(0) != MVT::v16i32)
13781     return SDValue();
13782 
13783   // Assumed zext or sext
13784   bool IsZExt = false;
13785   if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) {
13786     IsZExt = true;
13787   } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) {
13788     IsZExt = false;
13789   } else
13790     return SDValue();
13791 
13792   SDValue EXT0 = SUB->getOperand(0);
13793   SDValue EXT1 = SUB->getOperand(1);
13794   // Assumed zext's operand has v16i8 type
13795   if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 ||
13796       EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
13797     return SDValue();
13798 
13799   // Pattern is dectected. Let's convert it to sequence of nodes.
13800   SDLoc DL(N);
13801 
13802   // First, create the node pattern of UABD/SABD.
13803   SDValue UABDHigh8Op0 =
13804       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13805                   DAG.getConstant(8, DL, MVT::i64));
13806   SDValue UABDHigh8Op1 =
13807       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13808                   DAG.getConstant(8, DL, MVT::i64));
13809   SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13810                                   UABDHigh8Op0, UABDHigh8Op1);
13811   SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8);
13812 
13813   // Second, create the node pattern of UABAL.
13814   SDValue UABDLo8Op0 =
13815       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13816                   DAG.getConstant(0, DL, MVT::i64));
13817   SDValue UABDLo8Op1 =
13818       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13819                   DAG.getConstant(0, DL, MVT::i64));
13820   SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13821                                 UABDLo8Op0, UABDLo8Op1);
13822   SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8);
13823   SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD);
13824 
13825   // Third, create the node of UADDLP.
13826   SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL);
13827 
13828   // Fourth, create the node of VECREDUCE_ADD.
13829   return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
13830 }
13831 
13832 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
13833 //   vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
13834 //   vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
13835 static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
13836                                           const AArch64Subtarget *ST) {
13837   if (!ST->hasDotProd())
13838     return performVecReduceAddCombineWithUADDLP(N, DAG);
13839 
13840   SDValue Op0 = N->getOperand(0);
13841   if (N->getValueType(0) != MVT::i32 ||
13842       Op0.getValueType().getVectorElementType() != MVT::i32)
13843     return SDValue();
13844 
13845   unsigned ExtOpcode = Op0.getOpcode();
13846   SDValue A = Op0;
13847   SDValue B;
13848   if (ExtOpcode == ISD::MUL) {
13849     A = Op0.getOperand(0);
13850     B = Op0.getOperand(1);
13851     if (A.getOpcode() != B.getOpcode() ||
13852         A.getOperand(0).getValueType() != B.getOperand(0).getValueType())
13853       return SDValue();
13854     ExtOpcode = A.getOpcode();
13855   }
13856   if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND)
13857     return SDValue();
13858 
13859   EVT Op0VT = A.getOperand(0).getValueType();
13860   if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8)
13861     return SDValue();
13862 
13863   SDLoc DL(Op0);
13864   // For non-mla reductions B can be set to 1. For MLA we take the operand of
13865   // the extend B.
13866   if (!B)
13867     B = DAG.getConstant(1, DL, Op0VT);
13868   else
13869     B = B.getOperand(0);
13870 
13871   SDValue Zeros =
13872       DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32);
13873   auto DotOpcode =
13874       (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT;
13875   SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros,
13876                             A.getOperand(0), B);
13877   return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
13878 }
13879 
13880 // Given an (integer) vecreduce, we know the order of the inputs does not
13881 // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x))))
13882 // into UADDV(UADDLP(x)). This can also happen through an extra add, where we
13883 // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))).
13884 static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
13885   auto DetectAddExtract = [&](SDValue A) {
13886     // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning
13887     // UADDLP(x) if found.
13888     if (A.getOpcode() != ISD::ADD)
13889       return SDValue();
13890     EVT VT = A.getValueType();
13891     SDValue Op0 = A.getOperand(0);
13892     SDValue Op1 = A.getOperand(1);
13893     if (Op0.getOpcode() != Op0.getOpcode() ||
13894         (Op0.getOpcode() != ISD::ZERO_EXTEND &&
13895          Op0.getOpcode() != ISD::SIGN_EXTEND))
13896       return SDValue();
13897     SDValue Ext0 = Op0.getOperand(0);
13898     SDValue Ext1 = Op1.getOperand(0);
13899     if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13900         Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13901         Ext0.getOperand(0) != Ext1.getOperand(0))
13902       return SDValue();
13903     // Check that the type is twice the add types, and the extract are from
13904     // upper/lower parts of the same source.
13905     if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
13906         VT.getVectorNumElements() * 2)
13907       return SDValue();
13908     if ((Ext0.getConstantOperandVal(1) != 0 &&
13909          Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
13910         (Ext1.getConstantOperandVal(1) != 0 &&
13911          Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
13912       return SDValue();
13913     unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
13914                                                           : AArch64ISD::SADDLP;
13915     return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0));
13916   };
13917 
13918   SDValue A = N->getOperand(0);
13919   if (SDValue R = DetectAddExtract(A))
13920     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R);
13921   if (A.getOpcode() == ISD::ADD) {
13922     if (SDValue R = DetectAddExtract(A.getOperand(0)))
13923       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13924                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13925                                      A.getOperand(1)));
13926     if (SDValue R = DetectAddExtract(A.getOperand(1)))
13927       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13928                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13929                                      A.getOperand(0)));
13930   }
13931   return SDValue();
13932 }
13933 
13934 
13935 static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
13936                                  TargetLowering::DAGCombinerInfo &DCI,
13937                                  const AArch64Subtarget *Subtarget) {
13938   if (DCI.isBeforeLegalizeOps())
13939     return SDValue();
13940 
13941   return foldVectorXorShiftIntoCmp(N, DAG, Subtarget);
13942 }
13943 
13944 SDValue
13945 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13946                                      SelectionDAG &DAG,
13947                                      SmallVectorImpl<SDNode *> &Created) const {
13948   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
13949   if (isIntDivCheap(N->getValueType(0), Attr))
13950     return SDValue(N,0); // Lower SDIV as SDIV
13951 
13952   EVT VT = N->getValueType(0);
13953 
13954   // For scalable and fixed types, mark them as cheap so we can handle it much
13955   // later. This allows us to handle larger than legal types.
13956   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
13957     return SDValue(N, 0);
13958 
13959   // fold (sdiv X, pow2)
13960   if ((VT != MVT::i32 && VT != MVT::i64) ||
13961       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
13962     return SDValue();
13963 
13964   SDLoc DL(N);
13965   SDValue N0 = N->getOperand(0);
13966   unsigned Lg2 = Divisor.countTrailingZeros();
13967   SDValue Zero = DAG.getConstant(0, DL, VT);
13968   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
13969 
13970   // Add (N0 < 0) ? Pow2 - 1 : 0;
13971   SDValue CCVal;
13972   SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL);
13973   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
13974   SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp);
13975 
13976   Created.push_back(Cmp.getNode());
13977   Created.push_back(Add.getNode());
13978   Created.push_back(CSel.getNode());
13979 
13980   // Divide by pow2.
13981   SDValue SRA =
13982       DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
13983 
13984   // If we're dividing by a positive value, we're done.  Otherwise, we must
13985   // negate the result.
13986   if (Divisor.isNonNegative())
13987     return SRA;
13988 
13989   Created.push_back(SRA.getNode());
13990   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
13991 }
13992 
13993 SDValue
13994 AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
13995                                      SelectionDAG &DAG,
13996                                      SmallVectorImpl<SDNode *> &Created) const {
13997   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
13998   if (isIntDivCheap(N->getValueType(0), Attr))
13999     return SDValue(N, 0); // Lower SREM as SREM
14000 
14001   EVT VT = N->getValueType(0);
14002 
14003   // For scalable and fixed types, mark them as cheap so we can handle it much
14004   // later. This allows us to handle larger than legal types.
14005   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
14006     return SDValue(N, 0);
14007 
14008   // fold (srem X, pow2)
14009   if ((VT != MVT::i32 && VT != MVT::i64) ||
14010       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
14011     return SDValue();
14012 
14013   unsigned Lg2 = Divisor.countTrailingZeros();
14014   if (Lg2 == 0)
14015     return SDValue();
14016 
14017   SDLoc DL(N);
14018   SDValue N0 = N->getOperand(0);
14019   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
14020   SDValue Zero = DAG.getConstant(0, DL, VT);
14021   SDValue CCVal, CSNeg;
14022   if (Lg2 == 1) {
14023     SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL);
14024     SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14025     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp);
14026 
14027     Created.push_back(Cmp.getNode());
14028     Created.push_back(And.getNode());
14029   } else {
14030     SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC);
14031     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
14032 
14033     SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0);
14034     SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14035     SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne);
14036     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal,
14037                         Negs.getValue(1));
14038 
14039     Created.push_back(Negs.getNode());
14040     Created.push_back(AndPos.getNode());
14041     Created.push_back(AndNeg.getNode());
14042   }
14043 
14044   return CSNeg;
14045 }
14046 
14047 static bool IsSVECntIntrinsic(SDValue S) {
14048   switch(getIntrinsicID(S.getNode())) {
14049   default:
14050     break;
14051   case Intrinsic::aarch64_sve_cntb:
14052   case Intrinsic::aarch64_sve_cnth:
14053   case Intrinsic::aarch64_sve_cntw:
14054   case Intrinsic::aarch64_sve_cntd:
14055     return true;
14056   }
14057   return false;
14058 }
14059 
14060 /// Calculates what the pre-extend type is, based on the extension
14061 /// operation node provided by \p Extend.
14062 ///
14063 /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the
14064 /// pre-extend type is pulled directly from the operand, while other extend
14065 /// operations need a bit more inspection to get this information.
14066 ///
14067 /// \param Extend The SDNode from the DAG that represents the extend operation
14068 ///
14069 /// \returns The type representing the \p Extend source type, or \p MVT::Other
14070 /// if no valid type can be determined
14071 static EVT calculatePreExtendType(SDValue Extend) {
14072   switch (Extend.getOpcode()) {
14073   case ISD::SIGN_EXTEND:
14074   case ISD::ZERO_EXTEND:
14075     return Extend.getOperand(0).getValueType();
14076   case ISD::AssertSext:
14077   case ISD::AssertZext:
14078   case ISD::SIGN_EXTEND_INREG: {
14079     VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1));
14080     if (!TypeNode)
14081       return MVT::Other;
14082     return TypeNode->getVT();
14083   }
14084   case ISD::AND: {
14085     ConstantSDNode *Constant =
14086         dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode());
14087     if (!Constant)
14088       return MVT::Other;
14089 
14090     uint32_t Mask = Constant->getZExtValue();
14091 
14092     if (Mask == UCHAR_MAX)
14093       return MVT::i8;
14094     else if (Mask == USHRT_MAX)
14095       return MVT::i16;
14096     else if (Mask == UINT_MAX)
14097       return MVT::i32;
14098 
14099     return MVT::Other;
14100   }
14101   default:
14102     return MVT::Other;
14103   }
14104 }
14105 
14106 /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern
14107 /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector
14108 /// SExt/ZExt rather than the scalar SExt/ZExt
14109 static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
14110   EVT VT = BV.getValueType();
14111   if (BV.getOpcode() != ISD::BUILD_VECTOR &&
14112       BV.getOpcode() != ISD::VECTOR_SHUFFLE)
14113     return SDValue();
14114 
14115   // Use the first item in the buildvector/shuffle to get the size of the
14116   // extend, and make sure it looks valid.
14117   SDValue Extend = BV->getOperand(0);
14118   unsigned ExtendOpcode = Extend.getOpcode();
14119   bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND ||
14120                 ExtendOpcode == ISD::SIGN_EXTEND_INREG ||
14121                 ExtendOpcode == ISD::AssertSext;
14122   if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND &&
14123       ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND)
14124     return SDValue();
14125   // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure
14126   // calculatePreExtendType will work without issue.
14127   if (BV.getOpcode() == ISD::VECTOR_SHUFFLE &&
14128       ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND)
14129     return SDValue();
14130 
14131   // Restrict valid pre-extend data type
14132   EVT PreExtendType = calculatePreExtendType(Extend);
14133   if (PreExtendType == MVT::Other ||
14134       PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2)
14135     return SDValue();
14136 
14137   // Make sure all other operands are equally extended
14138   for (SDValue Op : drop_begin(BV->ops())) {
14139     if (Op.isUndef())
14140       continue;
14141     unsigned Opc = Op.getOpcode();
14142     bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG ||
14143                      Opc == ISD::AssertSext;
14144     if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType)
14145       return SDValue();
14146   }
14147 
14148   SDValue NBV;
14149   SDLoc DL(BV);
14150   if (BV.getOpcode() == ISD::BUILD_VECTOR) {
14151     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
14152     EVT PreExtendLegalType =
14153         PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
14154     SmallVector<SDValue, 8> NewOps;
14155     for (SDValue Op : BV->ops())
14156       NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
14157                                     : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
14158                                                            PreExtendLegalType));
14159     NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
14160   } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
14161     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
14162     NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
14163                                BV.getOperand(1).isUndef()
14164                                    ? DAG.getUNDEF(PreExtendVT)
14165                                    : BV.getOperand(1).getOperand(0),
14166                                cast<ShuffleVectorSDNode>(BV)->getMask());
14167   }
14168   return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV);
14169 }
14170 
14171 /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup))
14172 /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt
14173 static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {
14174   // If the value type isn't a vector, none of the operands are going to be dups
14175   EVT VT = Mul->getValueType(0);
14176   if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64)
14177     return SDValue();
14178 
14179   SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG);
14180   SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG);
14181 
14182   // Neither operands have been changed, don't make any further changes
14183   if (!Op0 && !Op1)
14184     return SDValue();
14185 
14186   SDLoc DL(Mul);
14187   return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0),
14188                      Op1 ? Op1 : Mul->getOperand(1));
14189 }
14190 
14191 static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
14192                                  TargetLowering::DAGCombinerInfo &DCI,
14193                                  const AArch64Subtarget *Subtarget) {
14194 
14195   if (SDValue Ext = performMulVectorExtendCombine(N, DAG))
14196     return Ext;
14197 
14198   if (DCI.isBeforeLegalizeOps())
14199     return SDValue();
14200 
14201   // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y,
14202   // and in MachineCombiner pass, add+mul will be combined into madd.
14203   // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X.
14204   SDLoc DL(N);
14205   EVT VT = N->getValueType(0);
14206   SDValue N0 = N->getOperand(0);
14207   SDValue N1 = N->getOperand(1);
14208   SDValue MulOper;
14209   unsigned AddSubOpc;
14210 
14211   auto IsAddSubWith1 = [&](SDValue V) -> bool {
14212     AddSubOpc = V->getOpcode();
14213     if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
14214       SDValue Opnd = V->getOperand(1);
14215       MulOper = V->getOperand(0);
14216       if (AddSubOpc == ISD::SUB)
14217         std::swap(Opnd, MulOper);
14218       if (auto C = dyn_cast<ConstantSDNode>(Opnd))
14219         return C->isOne();
14220     }
14221     return false;
14222   };
14223 
14224   if (IsAddSubWith1(N0)) {
14225     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
14226     return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
14227   }
14228 
14229   if (IsAddSubWith1(N1)) {
14230     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
14231     return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
14232   }
14233 
14234   // The below optimizations require a constant RHS.
14235   if (!isa<ConstantSDNode>(N1))
14236     return SDValue();
14237 
14238   ConstantSDNode *C = cast<ConstantSDNode>(N1);
14239   const APInt &ConstValue = C->getAPIntValue();
14240 
14241   // Allow the scaling to be folded into the `cnt` instruction by preventing
14242   // the scaling to be obscured here. This makes it easier to pattern match.
14243   if (IsSVECntIntrinsic(N0) ||
14244      (N0->getOpcode() == ISD::TRUNCATE &&
14245       (IsSVECntIntrinsic(N0->getOperand(0)))))
14246        if (ConstValue.sge(1) && ConstValue.sle(16))
14247          return SDValue();
14248 
14249   // Multiplication of a power of two plus/minus one can be done more
14250   // cheaply as as shift+add/sub. For now, this is true unilaterally. If
14251   // future CPUs have a cheaper MADD instruction, this may need to be
14252   // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
14253   // 64-bit is 5 cycles, so this is always a win.
14254   // More aggressively, some multiplications N0 * C can be lowered to
14255   // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
14256   // e.g. 6=3*2=(2+1)*2.
14257   // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
14258   // which equals to (1+2)*16-(1+2).
14259 
14260   // TrailingZeroes is used to test if the mul can be lowered to
14261   // shift+add+shift.
14262   unsigned TrailingZeroes = ConstValue.countTrailingZeros();
14263   if (TrailingZeroes) {
14264     // Conservatively do not lower to shift+add+shift if the mul might be
14265     // folded into smul or umul.
14266     if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
14267                             isZeroExtended(N0.getNode(), DAG)))
14268       return SDValue();
14269     // Conservatively do not lower to shift+add+shift if the mul might be
14270     // folded into madd or msub.
14271     if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD ||
14272                            N->use_begin()->getOpcode() == ISD::SUB))
14273       return SDValue();
14274   }
14275   // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
14276   // and shift+add+shift.
14277   APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
14278 
14279   unsigned ShiftAmt;
14280   // Is the shifted value the LHS operand of the add/sub?
14281   bool ShiftValUseIsN0 = true;
14282   // Do we need to negate the result?
14283   bool NegateResult = false;
14284 
14285   if (ConstValue.isNonNegative()) {
14286     // (mul x, 2^N + 1) => (add (shl x, N), x)
14287     // (mul x, 2^N - 1) => (sub (shl x, N), x)
14288     // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
14289     APInt SCVMinus1 = ShiftedConstValue - 1;
14290     APInt CVPlus1 = ConstValue + 1;
14291     if (SCVMinus1.isPowerOf2()) {
14292       ShiftAmt = SCVMinus1.logBase2();
14293       AddSubOpc = ISD::ADD;
14294     } else if (CVPlus1.isPowerOf2()) {
14295       ShiftAmt = CVPlus1.logBase2();
14296       AddSubOpc = ISD::SUB;
14297     } else
14298       return SDValue();
14299   } else {
14300     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
14301     // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
14302     APInt CVNegPlus1 = -ConstValue + 1;
14303     APInt CVNegMinus1 = -ConstValue - 1;
14304     if (CVNegPlus1.isPowerOf2()) {
14305       ShiftAmt = CVNegPlus1.logBase2();
14306       AddSubOpc = ISD::SUB;
14307       ShiftValUseIsN0 = false;
14308     } else if (CVNegMinus1.isPowerOf2()) {
14309       ShiftAmt = CVNegMinus1.logBase2();
14310       AddSubOpc = ISD::ADD;
14311       NegateResult = true;
14312     } else
14313       return SDValue();
14314   }
14315 
14316   SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0,
14317                                    DAG.getConstant(ShiftAmt, DL, MVT::i64));
14318 
14319   SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
14320   SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
14321   SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
14322   assert(!(NegateResult && TrailingZeroes) &&
14323          "NegateResult and TrailingZeroes cannot both be true for now.");
14324   // Negate the result.
14325   if (NegateResult)
14326     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
14327   // Shift the result.
14328   if (TrailingZeroes)
14329     return DAG.getNode(ISD::SHL, DL, VT, Res,
14330                        DAG.getConstant(TrailingZeroes, DL, MVT::i64));
14331   return Res;
14332 }
14333 
14334 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
14335                                                          SelectionDAG &DAG) {
14336   // Take advantage of vector comparisons producing 0 or -1 in each lane to
14337   // optimize away operation when it's from a constant.
14338   //
14339   // The general transformation is:
14340   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
14341   //       AND(VECTOR_CMP(x,y), constant2)
14342   //    constant2 = UNARYOP(constant)
14343 
14344   // Early exit if this isn't a vector operation, the operand of the
14345   // unary operation isn't a bitwise AND, or if the sizes of the operations
14346   // aren't the same.
14347   EVT VT = N->getValueType(0);
14348   if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
14349       N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
14350       VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
14351     return SDValue();
14352 
14353   // Now check that the other operand of the AND is a constant. We could
14354   // make the transformation for non-constant splats as well, but it's unclear
14355   // that would be a benefit as it would not eliminate any operations, just
14356   // perform one more step in scalar code before moving to the vector unit.
14357   if (BuildVectorSDNode *BV =
14358           dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
14359     // Bail out if the vector isn't a constant.
14360     if (!BV->isConstant())
14361       return SDValue();
14362 
14363     // Everything checks out. Build up the new and improved node.
14364     SDLoc DL(N);
14365     EVT IntVT = BV->getValueType(0);
14366     // Create a new constant of the appropriate type for the transformed
14367     // DAG.
14368     SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
14369     // The AND node needs bitcasts to/from an integer vector type around it.
14370     SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
14371     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
14372                                  N->getOperand(0)->getOperand(0), MaskConst);
14373     SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
14374     return Res;
14375   }
14376 
14377   return SDValue();
14378 }
14379 
14380 static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
14381                                      const AArch64Subtarget *Subtarget) {
14382   // First try to optimize away the conversion when it's conditionally from
14383   // a constant. Vectors only.
14384   if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
14385     return Res;
14386 
14387   EVT VT = N->getValueType(0);
14388   if (VT != MVT::f32 && VT != MVT::f64)
14389     return SDValue();
14390 
14391   // Only optimize when the source and destination types have the same width.
14392   if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
14393     return SDValue();
14394 
14395   // If the result of an integer load is only used by an integer-to-float
14396   // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
14397   // This eliminates an "integer-to-vector-move" UOP and improves throughput.
14398   SDValue N0 = N->getOperand(0);
14399   if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
14400       // Do not change the width of a volatile load.
14401       !cast<LoadSDNode>(N0)->isVolatile()) {
14402     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
14403     SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
14404                                LN0->getPointerInfo(), LN0->getAlign(),
14405                                LN0->getMemOperand()->getFlags());
14406 
14407     // Make sure successors of the original load stay after it by updating them
14408     // to use the new Chain.
14409     DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
14410 
14411     unsigned Opcode =
14412         (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
14413     return DAG.getNode(Opcode, SDLoc(N), VT, Load);
14414   }
14415 
14416   return SDValue();
14417 }
14418 
14419 /// Fold a floating-point multiply by power of two into floating-point to
14420 /// fixed-point conversion.
14421 static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
14422                                      TargetLowering::DAGCombinerInfo &DCI,
14423                                      const AArch64Subtarget *Subtarget) {
14424   if (!Subtarget->hasNEON())
14425     return SDValue();
14426 
14427   if (!N->getValueType(0).isSimple())
14428     return SDValue();
14429 
14430   SDValue Op = N->getOperand(0);
14431   if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL)
14432     return SDValue();
14433 
14434   if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector())
14435     return SDValue();
14436 
14437   SDValue ConstVec = Op->getOperand(1);
14438   if (!isa<BuildVectorSDNode>(ConstVec))
14439     return SDValue();
14440 
14441   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14442   uint32_t FloatBits = FloatTy.getSizeInBits();
14443   if (FloatBits != 32 && FloatBits != 64 &&
14444       (FloatBits != 16 || !Subtarget->hasFullFP16()))
14445     return SDValue();
14446 
14447   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14448   uint32_t IntBits = IntTy.getSizeInBits();
14449   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14450     return SDValue();
14451 
14452   // Avoid conversions where iN is larger than the float (e.g., float -> i64).
14453   if (IntBits > FloatBits)
14454     return SDValue();
14455 
14456   BitVector UndefElements;
14457   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14458   int32_t Bits = IntBits == 64 ? 64 : 32;
14459   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1);
14460   if (C == -1 || C == 0 || C > Bits)
14461     return SDValue();
14462 
14463   EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger();
14464   if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy))
14465     return SDValue();
14466 
14467   if (N->getOpcode() == ISD::FP_TO_SINT_SAT ||
14468       N->getOpcode() == ISD::FP_TO_UINT_SAT) {
14469     EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
14470     if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits)
14471       return SDValue();
14472   }
14473 
14474   SDLoc DL(N);
14475   bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT ||
14476                    N->getOpcode() == ISD::FP_TO_SINT_SAT);
14477   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
14478                                       : Intrinsic::aarch64_neon_vcvtfp2fxu;
14479   SDValue FixConv =
14480       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
14481                   DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
14482                   Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
14483   // We can handle smaller integers by generating an extra trunc.
14484   if (IntBits < FloatBits)
14485     FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
14486 
14487   return FixConv;
14488 }
14489 
14490 /// Fold a floating-point divide by power of two into fixed-point to
14491 /// floating-point conversion.
14492 static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG,
14493                                   TargetLowering::DAGCombinerInfo &DCI,
14494                                   const AArch64Subtarget *Subtarget) {
14495   if (!Subtarget->hasNEON())
14496     return SDValue();
14497 
14498   SDValue Op = N->getOperand(0);
14499   unsigned Opc = Op->getOpcode();
14500   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14501       !Op.getOperand(0).getValueType().isSimple() ||
14502       (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP))
14503     return SDValue();
14504 
14505   SDValue ConstVec = N->getOperand(1);
14506   if (!isa<BuildVectorSDNode>(ConstVec))
14507     return SDValue();
14508 
14509   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14510   int32_t IntBits = IntTy.getSizeInBits();
14511   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14512     return SDValue();
14513 
14514   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14515   int32_t FloatBits = FloatTy.getSizeInBits();
14516   if (FloatBits != 32 && FloatBits != 64)
14517     return SDValue();
14518 
14519   // Avoid conversions where iN is larger than the float (e.g., i64 -> float).
14520   if (IntBits > FloatBits)
14521     return SDValue();
14522 
14523   BitVector UndefElements;
14524   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14525   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1);
14526   if (C == -1 || C == 0 || C > FloatBits)
14527     return SDValue();
14528 
14529   MVT ResTy;
14530   unsigned NumLanes = Op.getValueType().getVectorNumElements();
14531   switch (NumLanes) {
14532   default:
14533     return SDValue();
14534   case 2:
14535     ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64;
14536     break;
14537   case 4:
14538     ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64;
14539     break;
14540   }
14541 
14542   if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps())
14543     return SDValue();
14544 
14545   SDLoc DL(N);
14546   SDValue ConvInput = Op.getOperand(0);
14547   bool IsSigned = Opc == ISD::SINT_TO_FP;
14548   if (IntBits < FloatBits)
14549     ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
14550                             ResTy, ConvInput);
14551 
14552   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
14553                                       : Intrinsic::aarch64_neon_vcvtfxu2fp;
14554   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
14555                      DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput,
14556                      DAG.getConstant(C, DL, MVT::i32));
14557 }
14558 
14559 /// An EXTR instruction is made up of two shifts, ORed together. This helper
14560 /// searches for and classifies those shifts.
14561 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
14562                          bool &FromHi) {
14563   if (N.getOpcode() == ISD::SHL)
14564     FromHi = false;
14565   else if (N.getOpcode() == ISD::SRL)
14566     FromHi = true;
14567   else
14568     return false;
14569 
14570   if (!isa<ConstantSDNode>(N.getOperand(1)))
14571     return false;
14572 
14573   ShiftAmount = N->getConstantOperandVal(1);
14574   Src = N->getOperand(0);
14575   return true;
14576 }
14577 
14578 /// EXTR instruction extracts a contiguous chunk of bits from two existing
14579 /// registers viewed as a high/low pair. This function looks for the pattern:
14580 /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
14581 /// with an EXTR. Can't quite be done in TableGen because the two immediates
14582 /// aren't independent.
14583 static SDValue tryCombineToEXTR(SDNode *N,
14584                                 TargetLowering::DAGCombinerInfo &DCI) {
14585   SelectionDAG &DAG = DCI.DAG;
14586   SDLoc DL(N);
14587   EVT VT = N->getValueType(0);
14588 
14589   assert(N->getOpcode() == ISD::OR && "Unexpected root");
14590 
14591   if (VT != MVT::i32 && VT != MVT::i64)
14592     return SDValue();
14593 
14594   SDValue LHS;
14595   uint32_t ShiftLHS = 0;
14596   bool LHSFromHi = false;
14597   if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
14598     return SDValue();
14599 
14600   SDValue RHS;
14601   uint32_t ShiftRHS = 0;
14602   bool RHSFromHi = false;
14603   if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
14604     return SDValue();
14605 
14606   // If they're both trying to come from the high part of the register, they're
14607   // not really an EXTR.
14608   if (LHSFromHi == RHSFromHi)
14609     return SDValue();
14610 
14611   if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
14612     return SDValue();
14613 
14614   if (LHSFromHi) {
14615     std::swap(LHS, RHS);
14616     std::swap(ShiftLHS, ShiftRHS);
14617   }
14618 
14619   return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
14620                      DAG.getConstant(ShiftRHS, DL, MVT::i64));
14621 }
14622 
14623 static SDValue tryCombineToBSL(SDNode *N,
14624                                 TargetLowering::DAGCombinerInfo &DCI) {
14625   EVT VT = N->getValueType(0);
14626   SelectionDAG &DAG = DCI.DAG;
14627   SDLoc DL(N);
14628 
14629   if (!VT.isVector())
14630     return SDValue();
14631 
14632   // The combining code currently only works for NEON vectors. In particular,
14633   // it does not work for SVE when dealing with vectors wider than 128 bits.
14634   if (!VT.is64BitVector() && !VT.is128BitVector())
14635     return SDValue();
14636 
14637   SDValue N0 = N->getOperand(0);
14638   if (N0.getOpcode() != ISD::AND)
14639     return SDValue();
14640 
14641   SDValue N1 = N->getOperand(1);
14642   if (N1.getOpcode() != ISD::AND)
14643     return SDValue();
14644 
14645   // InstCombine does (not (neg a)) => (add a -1).
14646   // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
14647   // Loop over all combinations of AND operands.
14648   for (int i = 1; i >= 0; --i) {
14649     for (int j = 1; j >= 0; --j) {
14650       SDValue O0 = N0->getOperand(i);
14651       SDValue O1 = N1->getOperand(j);
14652       SDValue Sub, Add, SubSibling, AddSibling;
14653 
14654       // Find a SUB and an ADD operand, one from each AND.
14655       if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
14656         Sub = O0;
14657         Add = O1;
14658         SubSibling = N0->getOperand(1 - i);
14659         AddSibling = N1->getOperand(1 - j);
14660       } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
14661         Add = O0;
14662         Sub = O1;
14663         AddSibling = N0->getOperand(1 - i);
14664         SubSibling = N1->getOperand(1 - j);
14665       } else
14666         continue;
14667 
14668       if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode()))
14669         continue;
14670 
14671       // Constant ones is always righthand operand of the Add.
14672       if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode()))
14673         continue;
14674 
14675       if (Sub.getOperand(1) != Add.getOperand(0))
14676         continue;
14677 
14678       return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
14679     }
14680   }
14681 
14682   // (or (and a b) (and (not a) c)) => (bsl a b c)
14683   // We only have to look for constant vectors here since the general, variable
14684   // case can be handled in TableGen.
14685   unsigned Bits = VT.getScalarSizeInBits();
14686   uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
14687   for (int i = 1; i >= 0; --i)
14688     for (int j = 1; j >= 0; --j) {
14689       BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
14690       BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
14691       if (!BVN0 || !BVN1)
14692         continue;
14693 
14694       bool FoundMatch = true;
14695       for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
14696         ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
14697         ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
14698         if (!CN0 || !CN1 ||
14699             CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
14700           FoundMatch = false;
14701           break;
14702         }
14703       }
14704 
14705       if (FoundMatch)
14706         return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0),
14707                            N0->getOperand(1 - i), N1->getOperand(1 - j));
14708     }
14709 
14710   return SDValue();
14711 }
14712 
14713 // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
14714 // convert to csel(ccmp(.., cc0)), depending on cc1:
14715 
14716 // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14717 // =>
14718 // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0))
14719 //
14720 // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14721 // =>
14722 // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
14723 static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
14724   EVT VT = N->getValueType(0);
14725   SDValue CSel0 = N->getOperand(0);
14726   SDValue CSel1 = N->getOperand(1);
14727 
14728   if (CSel0.getOpcode() != AArch64ISD::CSEL ||
14729       CSel1.getOpcode() != AArch64ISD::CSEL)
14730     return SDValue();
14731 
14732   if (!CSel0->hasOneUse() || !CSel1->hasOneUse())
14733     return SDValue();
14734 
14735   if (!isNullConstant(CSel0.getOperand(0)) ||
14736       !isOneConstant(CSel0.getOperand(1)) ||
14737       !isNullConstant(CSel1.getOperand(0)) ||
14738       !isOneConstant(CSel1.getOperand(1)))
14739     return SDValue();
14740 
14741   SDValue Cmp0 = CSel0.getOperand(3);
14742   SDValue Cmp1 = CSel1.getOperand(3);
14743   AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2);
14744   AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
14745   if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
14746     return SDValue();
14747   if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
14748       Cmp0.getOpcode() == AArch64ISD::SUBS) {
14749     std::swap(Cmp0, Cmp1);
14750     std::swap(CC0, CC1);
14751   }
14752 
14753   if (Cmp1.getOpcode() != AArch64ISD::SUBS)
14754     return SDValue();
14755 
14756   SDLoc DL(N);
14757   SDValue CCmp;
14758 
14759   if (N->getOpcode() == ISD::AND) {
14760     AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0);
14761     SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
14762     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
14763     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14764     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14765                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14766   } else {
14767     SDLoc DL(N);
14768     AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1);
14769     SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC);
14770     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
14771     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14772     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14773                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14774   }
14775   return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
14776                      CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32),
14777                      CCmp);
14778 }
14779 
14780 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
14781                                 const AArch64Subtarget *Subtarget) {
14782   SelectionDAG &DAG = DCI.DAG;
14783   EVT VT = N->getValueType(0);
14784 
14785   if (SDValue R = performANDORCSELCombine(N, DAG))
14786     return R;
14787 
14788   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14789     return SDValue();
14790 
14791   // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
14792   if (SDValue Res = tryCombineToEXTR(N, DCI))
14793     return Res;
14794 
14795   if (SDValue Res = tryCombineToBSL(N, DCI))
14796     return Res;
14797 
14798   return SDValue();
14799 }
14800 
14801 static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
14802   if (!MemVT.getVectorElementType().isSimple())
14803     return false;
14804 
14805   uint64_t MaskForTy = 0ull;
14806   switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
14807   case MVT::i8:
14808     MaskForTy = 0xffull;
14809     break;
14810   case MVT::i16:
14811     MaskForTy = 0xffffull;
14812     break;
14813   case MVT::i32:
14814     MaskForTy = 0xffffffffull;
14815     break;
14816   default:
14817     return false;
14818     break;
14819   }
14820 
14821   if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
14822     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
14823       return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
14824 
14825   return false;
14826 }
14827 
14828 static SDValue performSVEAndCombine(SDNode *N,
14829                                     TargetLowering::DAGCombinerInfo &DCI) {
14830   if (DCI.isBeforeLegalizeOps())
14831     return SDValue();
14832 
14833   SelectionDAG &DAG = DCI.DAG;
14834   SDValue Src = N->getOperand(0);
14835   unsigned Opc = Src->getOpcode();
14836 
14837   // Zero/any extend of an unsigned unpack
14838   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
14839     SDValue UnpkOp = Src->getOperand(0);
14840     SDValue Dup = N->getOperand(1);
14841 
14842     if (Dup.getOpcode() != ISD::SPLAT_VECTOR)
14843       return SDValue();
14844 
14845     SDLoc DL(N);
14846     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0));
14847     if (!C)
14848       return SDValue();
14849 
14850     uint64_t ExtVal = C->getZExtValue();
14851 
14852     // If the mask is fully covered by the unpack, we don't need to push
14853     // a new AND onto the operand
14854     EVT EltTy = UnpkOp->getValueType(0).getVectorElementType();
14855     if ((ExtVal == 0xFF && EltTy == MVT::i8) ||
14856         (ExtVal == 0xFFFF && EltTy == MVT::i16) ||
14857         (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32))
14858       return Src;
14859 
14860     // Truncate to prevent a DUP with an over wide constant
14861     APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits());
14862 
14863     // Otherwise, make sure we propagate the AND to the operand
14864     // of the unpack
14865     Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0),
14866                       DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32));
14867 
14868     SDValue And = DAG.getNode(ISD::AND, DL,
14869                               UnpkOp->getValueType(0), UnpkOp, Dup);
14870 
14871     return DAG.getNode(Opc, DL, N->getValueType(0), And);
14872   }
14873 
14874   if (!EnableCombineMGatherIntrinsics)
14875     return SDValue();
14876 
14877   SDValue Mask = N->getOperand(1);
14878 
14879   if (!Src.hasOneUse())
14880     return SDValue();
14881 
14882   EVT MemVT;
14883 
14884   // SVE load instructions perform an implicit zero-extend, which makes them
14885   // perfect candidates for combining.
14886   switch (Opc) {
14887   case AArch64ISD::LD1_MERGE_ZERO:
14888   case AArch64ISD::LDNF1_MERGE_ZERO:
14889   case AArch64ISD::LDFF1_MERGE_ZERO:
14890     MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT();
14891     break;
14892   case AArch64ISD::GLD1_MERGE_ZERO:
14893   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
14894   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
14895   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
14896   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
14897   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
14898   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
14899   case AArch64ISD::GLDFF1_MERGE_ZERO:
14900   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
14901   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
14902   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
14903   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
14904   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
14905   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
14906   case AArch64ISD::GLDNT1_MERGE_ZERO:
14907     MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
14908     break;
14909   default:
14910     return SDValue();
14911   }
14912 
14913   if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
14914     return Src;
14915 
14916   return SDValue();
14917 }
14918 
14919 static SDValue performANDCombine(SDNode *N,
14920                                  TargetLowering::DAGCombinerInfo &DCI) {
14921   SelectionDAG &DAG = DCI.DAG;
14922   SDValue LHS = N->getOperand(0);
14923   SDValue RHS = N->getOperand(1);
14924   EVT VT = N->getValueType(0);
14925 
14926   if (SDValue R = performANDORCSELCombine(N, DAG))
14927     return R;
14928 
14929   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14930     return SDValue();
14931 
14932   if (VT.isScalableVector())
14933     return performSVEAndCombine(N, DCI);
14934 
14935   // The combining code below works only for NEON vectors. In particular, it
14936   // does not work for SVE when dealing with vectors wider than 128 bits.
14937   if (!VT.is64BitVector() && !VT.is128BitVector())
14938     return SDValue();
14939 
14940   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
14941   if (!BVN)
14942     return SDValue();
14943 
14944   // AND does not accept an immediate, so check if we can use a BIC immediate
14945   // instruction instead. We do this here instead of using a (and x, (mvni imm))
14946   // pattern in isel, because some immediates may be lowered to the preferred
14947   // (and x, (movi imm)) form, even though an mvni representation also exists.
14948   APInt DefBits(VT.getSizeInBits(), 0);
14949   APInt UndefBits(VT.getSizeInBits(), 0);
14950   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
14951     SDValue NewOp;
14952 
14953     DefBits = ~DefBits;
14954     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14955                                     DefBits, &LHS)) ||
14956         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14957                                     DefBits, &LHS)))
14958       return NewOp;
14959 
14960     UndefBits = ~UndefBits;
14961     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14962                                     UndefBits, &LHS)) ||
14963         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14964                                     UndefBits, &LHS)))
14965       return NewOp;
14966   }
14967 
14968   return SDValue();
14969 }
14970 
14971 static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
14972   switch (Opcode) {
14973   case ISD::STRICT_FADD:
14974   case ISD::FADD:
14975     return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64;
14976   case ISD::ADD:
14977     return VT == MVT::i64;
14978   default:
14979     return false;
14980   }
14981 }
14982 
14983 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
14984                         AArch64CC::CondCode Cond);
14985 
14986 static bool isPredicateCCSettingOp(SDValue N) {
14987   if ((N.getOpcode() == ISD::SETCC) ||
14988       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14989        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
14990         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
14991         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi ||
14992         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs ||
14993         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
14994         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
14995         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
14996         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
14997         // get_active_lane_mask is lowered to a whilelo instruction.
14998         N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask)))
14999     return true;
15000 
15001   return false;
15002 }
15003 
15004 // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
15005 // ... into: "ptrue p, all" + PTEST
15006 static SDValue
15007 performFirstTrueTestVectorCombine(SDNode *N,
15008                                   TargetLowering::DAGCombinerInfo &DCI,
15009                                   const AArch64Subtarget *Subtarget) {
15010   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15011   // Make sure PTEST can be legalised with illegal types.
15012   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15013     return SDValue();
15014 
15015   SDValue N0 = N->getOperand(0);
15016   EVT VT = N0.getValueType();
15017 
15018   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 ||
15019       !isNullConstant(N->getOperand(1)))
15020     return SDValue();
15021 
15022   // Restricted the DAG combine to only cases where we're extracting from a
15023   // flag-setting operation.
15024   if (!isPredicateCCSettingOp(N0))
15025     return SDValue();
15026 
15027   // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0
15028   SelectionDAG &DAG = DCI.DAG;
15029   SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all);
15030   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE);
15031 }
15032 
15033 // Materialize : Idx = (add (mul vscale, NumEls), -1)
15034 //               i1 = extract_vector_elt t37, Constant:i64<Idx>
15035 //     ... into: "ptrue p, all" + PTEST
15036 static SDValue
15037 performLastTrueTestVectorCombine(SDNode *N,
15038                                  TargetLowering::DAGCombinerInfo &DCI,
15039                                  const AArch64Subtarget *Subtarget) {
15040   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15041   // Make sure PTEST is legal types.
15042   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15043     return SDValue();
15044 
15045   SDValue N0 = N->getOperand(0);
15046   EVT OpVT = N0.getValueType();
15047 
15048   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
15049     return SDValue();
15050 
15051   // Idx == (add (mul vscale, NumEls), -1)
15052   SDValue Idx = N->getOperand(1);
15053   if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1)))
15054     return SDValue();
15055 
15056   SDValue VS = Idx.getOperand(0);
15057   if (VS.getOpcode() != ISD::VSCALE)
15058     return SDValue();
15059 
15060   unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue();
15061   if (VS.getConstantOperandVal(0) != NumEls)
15062     return SDValue();
15063 
15064   // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0
15065   SelectionDAG &DAG = DCI.DAG;
15066   SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all);
15067   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
15068 }
15069 
15070 static SDValue
15071 performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15072                                const AArch64Subtarget *Subtarget) {
15073   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15074   if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget))
15075     return Res;
15076   if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
15077     return Res;
15078 
15079   SelectionDAG &DAG = DCI.DAG;
15080   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15081   ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);
15082 
15083   EVT VT = N->getValueType(0);
15084   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
15085   bool IsStrict = N0->isStrictFPOpcode();
15086 
15087   // extract(dup x) -> x
15088   if (N0.getOpcode() == AArch64ISD::DUP)
15089     return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
15090 
15091   // Rewrite for pairwise fadd pattern
15092   //   (f32 (extract_vector_elt
15093   //           (fadd (vXf32 Other)
15094   //                 (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0))
15095   // ->
15096   //   (f32 (fadd (extract_vector_elt (vXf32 Other) 0)
15097   //              (extract_vector_elt (vXf32 Other) 1))
15098   // For strict_fadd we need to make sure the old strict_fadd can be deleted, so
15099   // we can only do this when it's used only by the extract_vector_elt.
15100   if (ConstantN1 && ConstantN1->getZExtValue() == 0 &&
15101       hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) &&
15102       (!IsStrict || N0.hasOneUse())) {
15103     SDLoc DL(N0);
15104     SDValue N00 = N0->getOperand(IsStrict ? 1 : 0);
15105     SDValue N01 = N0->getOperand(IsStrict ? 2 : 1);
15106 
15107     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01);
15108     SDValue Other = N00;
15109 
15110     // And handle the commutative case.
15111     if (!Shuffle) {
15112       Shuffle = dyn_cast<ShuffleVectorSDNode>(N00);
15113       Other = N01;
15114     }
15115 
15116     if (Shuffle && Shuffle->getMaskElt(0) == 1 &&
15117         Other == Shuffle->getOperand(0)) {
15118       SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15119                                      DAG.getConstant(0, DL, MVT::i64));
15120       SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15121                                      DAG.getConstant(1, DL, MVT::i64));
15122       if (!IsStrict)
15123         return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2);
15124 
15125       // For strict_fadd we need uses of the final extract_vector to be replaced
15126       // with the strict_fadd, but we also need uses of the chain output of the
15127       // original strict_fadd to use the chain output of the new strict_fadd as
15128       // otherwise it may not be deleted.
15129       SDValue Ret = DAG.getNode(N0->getOpcode(), DL,
15130                                 {VT, MVT::Other},
15131                                 {N0->getOperand(0), Extract1, Extract2});
15132       DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret);
15133       DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1));
15134       return SDValue(N, 0);
15135     }
15136   }
15137 
15138   return SDValue();
15139 }
15140 
15141 static SDValue performConcatVectorsCombine(SDNode *N,
15142                                            TargetLowering::DAGCombinerInfo &DCI,
15143                                            SelectionDAG &DAG) {
15144   SDLoc dl(N);
15145   EVT VT = N->getValueType(0);
15146   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15147   unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode();
15148 
15149   if (VT.isScalableVector())
15150     return SDValue();
15151 
15152   // Optimize concat_vectors of truncated vectors, where the intermediate
15153   // type is illegal, to avoid said illegality,  e.g.,
15154   //   (v4i16 (concat_vectors (v2i16 (truncate (v2i64))),
15155   //                          (v2i16 (truncate (v2i64)))))
15156   // ->
15157   //   (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))),
15158   //                                    (v4i32 (bitcast (v2i64))),
15159   //                                    <0, 2, 4, 6>)))
15160   // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed
15161   // on both input and result type, so we might generate worse code.
15162   // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8.
15163   if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
15164       N1Opc == ISD::TRUNCATE) {
15165     SDValue N00 = N0->getOperand(0);
15166     SDValue N10 = N1->getOperand(0);
15167     EVT N00VT = N00.getValueType();
15168 
15169     if (N00VT == N10.getValueType() &&
15170         (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) &&
15171         N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) {
15172       MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16);
15173       SmallVector<int, 8> Mask(MidVT.getVectorNumElements());
15174       for (size_t i = 0; i < Mask.size(); ++i)
15175         Mask[i] = i * 2;
15176       return DAG.getNode(ISD::TRUNCATE, dl, VT,
15177                          DAG.getVectorShuffle(
15178                              MidVT, dl,
15179                              DAG.getNode(ISD::BITCAST, dl, MidVT, N00),
15180                              DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask));
15181     }
15182   }
15183 
15184   if (N->getOperand(0).getValueType() == MVT::v4i8) {
15185     // If we have a concat of v4i8 loads, convert them to a buildvector of f32
15186     // loads to prevent having to go through the v4i8 load legalization that
15187     // needs to extend each element into a larger type.
15188     if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
15189           if (V.getValueType() != MVT::v4i8)
15190             return false;
15191           if (V.isUndef())
15192             return true;
15193           LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
15194           return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
15195                  LD->getExtensionType() == ISD::NON_EXTLOAD;
15196         })) {
15197       EVT NVT =
15198           EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
15199       SmallVector<SDValue> Ops;
15200 
15201       for (unsigned i = 0; i < N->getNumOperands(); i++) {
15202         SDValue V = N->getOperand(i);
15203         if (V.isUndef())
15204           Ops.push_back(DAG.getUNDEF(MVT::f32));
15205         else {
15206           LoadSDNode *LD = cast<LoadSDNode>(V);
15207           SDValue NewLoad =
15208               DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
15209                           LD->getMemOperand());
15210           DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
15211           Ops.push_back(NewLoad);
15212         }
15213       }
15214       return DAG.getBitcast(N->getValueType(0),
15215                             DAG.getBuildVector(NVT, dl, Ops));
15216     }
15217   }
15218 
15219 
15220   // Wait 'til after everything is legalized to try this. That way we have
15221   // legal vector types and such.
15222   if (DCI.isBeforeLegalizeOps())
15223     return SDValue();
15224 
15225   // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
15226   // extracted subvectors from the same original vectors. Combine these into a
15227   // single avg that operates on the two original vectors.
15228   // avgceil is the target independant name for rhadd, avgfloor is a hadd.
15229   // Example:
15230   //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
15231   //                                   extract_subvector (v16i8 OpB, <0>))),
15232   //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
15233   //                                   extract_subvector (v16i8 OpB, <8>)))))
15234   // ->
15235   //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
15236   if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
15237       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
15238        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
15239     SDValue N00 = N0->getOperand(0);
15240     SDValue N01 = N0->getOperand(1);
15241     SDValue N10 = N1->getOperand(0);
15242     SDValue N11 = N1->getOperand(1);
15243 
15244     EVT N00VT = N00.getValueType();
15245     EVT N10VT = N10.getValueType();
15246 
15247     if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15248         N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15249         N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15250         N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
15251       SDValue N00Source = N00->getOperand(0);
15252       SDValue N01Source = N01->getOperand(0);
15253       SDValue N10Source = N10->getOperand(0);
15254       SDValue N11Source = N11->getOperand(0);
15255 
15256       if (N00Source == N10Source && N01Source == N11Source &&
15257           N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
15258         assert(N0.getValueType() == N1.getValueType());
15259 
15260         uint64_t N00Index = N00.getConstantOperandVal(1);
15261         uint64_t N01Index = N01.getConstantOperandVal(1);
15262         uint64_t N10Index = N10.getConstantOperandVal(1);
15263         uint64_t N11Index = N11.getConstantOperandVal(1);
15264 
15265         if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
15266             N10Index == N00VT.getVectorNumElements())
15267           return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
15268       }
15269     }
15270   }
15271 
15272   // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
15273   // splat. The indexed instructions are going to be expecting a DUPLANE64, so
15274   // canonicalise to that.
15275   if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) {
15276     assert(VT.getScalarSizeInBits() == 64);
15277     return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
15278                        DAG.getConstant(0, dl, MVT::i64));
15279   }
15280 
15281   // Canonicalise concat_vectors so that the right-hand vector has as few
15282   // bit-casts as possible before its real operation. The primary matching
15283   // destination for these operations will be the narrowing "2" instructions,
15284   // which depend on the operation being performed on this right-hand vector.
15285   // For example,
15286   //    (concat_vectors LHS,  (v1i64 (bitconvert (v4i16 RHS))))
15287   // becomes
15288   //    (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
15289 
15290   if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST)
15291     return SDValue();
15292   SDValue RHS = N1->getOperand(0);
15293   MVT RHSTy = RHS.getValueType().getSimpleVT();
15294   // If the RHS is not a vector, this is not the pattern we're looking for.
15295   if (!RHSTy.isVector())
15296     return SDValue();
15297 
15298   LLVM_DEBUG(
15299       dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
15300 
15301   MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
15302                                   RHSTy.getVectorNumElements() * 2);
15303   return DAG.getNode(ISD::BITCAST, dl, VT,
15304                      DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
15305                                  DAG.getNode(ISD::BITCAST, dl, RHSTy, N0),
15306                                  RHS));
15307 }
15308 
15309 static SDValue
15310 performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15311                                SelectionDAG &DAG) {
15312   if (DCI.isBeforeLegalizeOps())
15313     return SDValue();
15314 
15315   EVT VT = N->getValueType(0);
15316   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
15317     return SDValue();
15318 
15319   SDValue V = N->getOperand(0);
15320 
15321   // NOTE: This combine exists in DAGCombiner, but that version's legality check
15322   // blocks this combine because the non-const case requires custom lowering.
15323   //
15324   // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const)
15325   if (V.getOpcode() == ISD::SPLAT_VECTOR)
15326     if (isa<ConstantSDNode>(V.getOperand(0)))
15327       return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0));
15328 
15329   return SDValue();
15330 }
15331 
15332 static SDValue
15333 performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15334                               SelectionDAG &DAG) {
15335   SDLoc DL(N);
15336   SDValue Vec = N->getOperand(0);
15337   SDValue SubVec = N->getOperand(1);
15338   uint64_t IdxVal = N->getConstantOperandVal(2);
15339   EVT VecVT = Vec.getValueType();
15340   EVT SubVT = SubVec.getValueType();
15341 
15342   // Only do this for legal fixed vector types.
15343   if (!VecVT.isFixedLengthVector() ||
15344       !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
15345       !DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
15346     return SDValue();
15347 
15348   // Ignore widening patterns.
15349   if (IdxVal == 0 && Vec.isUndef())
15350     return SDValue();
15351 
15352   // Subvector must be half the width and an "aligned" insertion.
15353   unsigned NumSubElts = SubVT.getVectorNumElements();
15354   if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
15355       (IdxVal != 0 && IdxVal != NumSubElts))
15356     return SDValue();
15357 
15358   // Fold insert_subvector -> concat_vectors
15359   // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
15360   // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
15361   SDValue Lo, Hi;
15362   if (IdxVal == 0) {
15363     Lo = SubVec;
15364     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15365                      DAG.getVectorIdxConstant(NumSubElts, DL));
15366   } else {
15367     Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15368                      DAG.getVectorIdxConstant(0, DL));
15369     Hi = SubVec;
15370   }
15371   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
15372 }
15373 
15374 static SDValue tryCombineFixedPointConvert(SDNode *N,
15375                                            TargetLowering::DAGCombinerInfo &DCI,
15376                                            SelectionDAG &DAG) {
15377   // Wait until after everything is legalized to try this. That way we have
15378   // legal vector types and such.
15379   if (DCI.isBeforeLegalizeOps())
15380     return SDValue();
15381   // Transform a scalar conversion of a value from a lane extract into a
15382   // lane extract of a vector conversion. E.g., from foo1 to foo2:
15383   // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
15384   // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
15385   //
15386   // The second form interacts better with instruction selection and the
15387   // register allocator to avoid cross-class register copies that aren't
15388   // coalescable due to a lane reference.
15389 
15390   // Check the operand and see if it originates from a lane extract.
15391   SDValue Op1 = N->getOperand(1);
15392   if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15393     return SDValue();
15394 
15395   // Yep, no additional predication needed. Perform the transform.
15396   SDValue IID = N->getOperand(0);
15397   SDValue Shift = N->getOperand(2);
15398   SDValue Vec = Op1.getOperand(0);
15399   SDValue Lane = Op1.getOperand(1);
15400   EVT ResTy = N->getValueType(0);
15401   EVT VecResTy;
15402   SDLoc DL(N);
15403 
15404   // The vector width should be 128 bits by the time we get here, even
15405   // if it started as 64 bits (the extract_vector handling will have
15406   // done so). Bail if it is not.
15407   if (Vec.getValueSizeInBits() != 128)
15408     return SDValue();
15409 
15410   if (Vec.getValueType() == MVT::v4i32)
15411     VecResTy = MVT::v4f32;
15412   else if (Vec.getValueType() == MVT::v2i64)
15413     VecResTy = MVT::v2f64;
15414   else
15415     return SDValue();
15416 
15417   SDValue Convert =
15418       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
15419   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
15420 }
15421 
15422 // AArch64 high-vector "long" operations are formed by performing the non-high
15423 // version on an extract_subvector of each operand which gets the high half:
15424 //
15425 //  (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
15426 //
15427 // However, there are cases which don't have an extract_high explicitly, but
15428 // have another operation that can be made compatible with one for free. For
15429 // example:
15430 //
15431 //  (dupv64 scalar) --> (extract_high (dup128 scalar))
15432 //
15433 // This routine does the actual conversion of such DUPs, once outer routines
15434 // have determined that everything else is in order.
15435 // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
15436 // similarly here.
15437 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
15438   MVT VT = N.getSimpleValueType();
15439   if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15440       N.getConstantOperandVal(1) == 0)
15441     N = N.getOperand(0);
15442 
15443   switch (N.getOpcode()) {
15444   case AArch64ISD::DUP:
15445   case AArch64ISD::DUPLANE8:
15446   case AArch64ISD::DUPLANE16:
15447   case AArch64ISD::DUPLANE32:
15448   case AArch64ISD::DUPLANE64:
15449   case AArch64ISD::MOVI:
15450   case AArch64ISD::MOVIshift:
15451   case AArch64ISD::MOVIedit:
15452   case AArch64ISD::MOVImsl:
15453   case AArch64ISD::MVNIshift:
15454   case AArch64ISD::MVNImsl:
15455     break;
15456   default:
15457     // FMOV could be supported, but isn't very useful, as it would only occur
15458     // if you passed a bitcast' floating point immediate to an eligible long
15459     // integer op (addl, smull, ...).
15460     return SDValue();
15461   }
15462 
15463   if (!VT.is64BitVector())
15464     return SDValue();
15465 
15466   SDLoc DL(N);
15467   unsigned NumElems = VT.getVectorNumElements();
15468   if (N.getValueType().is64BitVector()) {
15469     MVT ElementTy = VT.getVectorElementType();
15470     MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
15471     N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
15472   }
15473 
15474   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N,
15475                      DAG.getConstant(NumElems, DL, MVT::i64));
15476 }
15477 
15478 static bool isEssentiallyExtractHighSubvector(SDValue N) {
15479   if (N.getOpcode() == ISD::BITCAST)
15480     N = N.getOperand(0);
15481   if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR)
15482     return false;
15483   if (N.getOperand(0).getValueType().isScalableVector())
15484     return false;
15485   return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() ==
15486          N.getOperand(0).getValueType().getVectorNumElements() / 2;
15487 }
15488 
15489 /// Helper structure to keep track of ISD::SET_CC operands.
15490 struct GenericSetCCInfo {
15491   const SDValue *Opnd0;
15492   const SDValue *Opnd1;
15493   ISD::CondCode CC;
15494 };
15495 
15496 /// Helper structure to keep track of a SET_CC lowered into AArch64 code.
15497 struct AArch64SetCCInfo {
15498   const SDValue *Cmp;
15499   AArch64CC::CondCode CC;
15500 };
15501 
15502 /// Helper structure to keep track of SetCC information.
15503 union SetCCInfo {
15504   GenericSetCCInfo Generic;
15505   AArch64SetCCInfo AArch64;
15506 };
15507 
15508 /// Helper structure to be able to read SetCC information.  If set to
15509 /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
15510 /// GenericSetCCInfo.
15511 struct SetCCInfoAndKind {
15512   SetCCInfo Info;
15513   bool IsAArch64;
15514 };
15515 
15516 /// Check whether or not \p Op is a SET_CC operation, either a generic or
15517 /// an
15518 /// AArch64 lowered one.
15519 /// \p SetCCInfo is filled accordingly.
15520 /// \post SetCCInfo is meanginfull only when this function returns true.
15521 /// \return True when Op is a kind of SET_CC operation.
15522 static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
15523   // If this is a setcc, this is straight forward.
15524   if (Op.getOpcode() == ISD::SETCC) {
15525     SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
15526     SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
15527     SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15528     SetCCInfo.IsAArch64 = false;
15529     return true;
15530   }
15531   // Otherwise, check if this is a matching csel instruction.
15532   // In other words:
15533   // - csel 1, 0, cc
15534   // - csel 0, 1, !cc
15535   if (Op.getOpcode() != AArch64ISD::CSEL)
15536     return false;
15537   // Set the information about the operands.
15538   // TODO: we want the operands of the Cmp not the csel
15539   SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
15540   SetCCInfo.IsAArch64 = true;
15541   SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
15542       cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
15543 
15544   // Check that the operands matches the constraints:
15545   // (1) Both operands must be constants.
15546   // (2) One must be 1 and the other must be 0.
15547   ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
15548   ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15549 
15550   // Check (1).
15551   if (!TValue || !FValue)
15552     return false;
15553 
15554   // Check (2).
15555   if (!TValue->isOne()) {
15556     // Update the comparison when we are interested in !cc.
15557     std::swap(TValue, FValue);
15558     SetCCInfo.Info.AArch64.CC =
15559         AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
15560   }
15561   return TValue->isOne() && FValue->isZero();
15562 }
15563 
15564 // Returns true if Op is setcc or zext of setcc.
15565 static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
15566   if (isSetCC(Op, Info))
15567     return true;
15568   return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
15569     isSetCC(Op->getOperand(0), Info));
15570 }
15571 
15572 // The folding we want to perform is:
15573 // (add x, [zext] (setcc cc ...) )
15574 //   -->
15575 // (csel x, (add x, 1), !cc ...)
15576 //
15577 // The latter will get matched to a CSINC instruction.
15578 static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
15579   assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
15580   SDValue LHS = Op->getOperand(0);
15581   SDValue RHS = Op->getOperand(1);
15582   SetCCInfoAndKind InfoAndKind;
15583 
15584   // If both operands are a SET_CC, then we don't want to perform this
15585   // folding and create another csel as this results in more instructions
15586   // (and higher register usage).
15587   if (isSetCCOrZExtSetCC(LHS, InfoAndKind) &&
15588       isSetCCOrZExtSetCC(RHS, InfoAndKind))
15589     return SDValue();
15590 
15591   // If neither operand is a SET_CC, give up.
15592   if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
15593     std::swap(LHS, RHS);
15594     if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
15595       return SDValue();
15596   }
15597 
15598   // FIXME: This could be generatized to work for FP comparisons.
15599   EVT CmpVT = InfoAndKind.IsAArch64
15600                   ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
15601                   : InfoAndKind.Info.Generic.Opnd0->getValueType();
15602   if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
15603     return SDValue();
15604 
15605   SDValue CCVal;
15606   SDValue Cmp;
15607   SDLoc dl(Op);
15608   if (InfoAndKind.IsAArch64) {
15609     CCVal = DAG.getConstant(
15610         AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
15611         MVT::i32);
15612     Cmp = *InfoAndKind.Info.AArch64.Cmp;
15613   } else
15614     Cmp = getAArch64Cmp(
15615         *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1,
15616         ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG,
15617         dl);
15618 
15619   EVT VT = Op->getValueType(0);
15620   LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
15621   return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
15622 }
15623 
15624 // ADD(UADDV a, UADDV b) -->  UADDV(ADD a, b)
15625 static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {
15626   EVT VT = N->getValueType(0);
15627   // Only scalar integer and vector types.
15628   if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger())
15629     return SDValue();
15630 
15631   SDValue LHS = N->getOperand(0);
15632   SDValue RHS = N->getOperand(1);
15633   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15634       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT)
15635     return SDValue();
15636 
15637   auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15638   auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
15639   if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero())
15640     return SDValue();
15641 
15642   SDValue Op1 = LHS->getOperand(0);
15643   SDValue Op2 = RHS->getOperand(0);
15644   EVT OpVT1 = Op1.getValueType();
15645   EVT OpVT2 = Op2.getValueType();
15646   if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 ||
15647       Op2.getOpcode() != AArch64ISD::UADDV ||
15648       OpVT1.getVectorElementType() != VT)
15649     return SDValue();
15650 
15651   SDValue Val1 = Op1.getOperand(0);
15652   SDValue Val2 = Op2.getOperand(0);
15653   EVT ValVT = Val1->getValueType(0);
15654   SDLoc DL(N);
15655   SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2);
15656   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
15657                      DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal),
15658                      DAG.getConstant(0, DL, MVT::i64));
15659 }
15660 
15661 /// Perform the scalar expression combine in the form of:
15662 ///   CSEL(c, 1, cc) + b => CSINC(b+c, b, cc)
15663 ///   CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
15664 static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {
15665   EVT VT = N->getValueType(0);
15666   if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD)
15667     return SDValue();
15668 
15669   SDValue LHS = N->getOperand(0);
15670   SDValue RHS = N->getOperand(1);
15671 
15672   // Handle commutivity.
15673   if (LHS.getOpcode() != AArch64ISD::CSEL &&
15674       LHS.getOpcode() != AArch64ISD::CSNEG) {
15675     std::swap(LHS, RHS);
15676     if (LHS.getOpcode() != AArch64ISD::CSEL &&
15677         LHS.getOpcode() != AArch64ISD::CSNEG) {
15678       return SDValue();
15679     }
15680   }
15681 
15682   if (!LHS.hasOneUse())
15683     return SDValue();
15684 
15685   AArch64CC::CondCode AArch64CC =
15686       static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2));
15687 
15688   // The CSEL should include a const one operand, and the CSNEG should include
15689   // One or NegOne operand.
15690   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0));
15691   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
15692   if (!CTVal || !CFVal)
15693     return SDValue();
15694 
15695   if (!(LHS.getOpcode() == AArch64ISD::CSEL &&
15696         (CTVal->isOne() || CFVal->isOne())) &&
15697       !(LHS.getOpcode() == AArch64ISD::CSNEG &&
15698         (CTVal->isOne() || CFVal->isAllOnes())))
15699     return SDValue();
15700 
15701   // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc)
15702   if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() &&
15703       !CFVal->isOne()) {
15704     std::swap(CTVal, CFVal);
15705     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15706   }
15707 
15708   SDLoc DL(N);
15709   // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc)
15710   if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() &&
15711       !CFVal->isAllOnes()) {
15712     APInt C = -1 * CFVal->getAPIntValue();
15713     CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT));
15714     CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT));
15715     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15716   }
15717 
15718   // It might be neutral for larger constants, as the immediate need to be
15719   // materialized in a register.
15720   APInt ADDC = CTVal->getAPIntValue();
15721   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15722   if (!TLI.isLegalAddImmediate(ADDC.getSExtValue()))
15723     return SDValue();
15724 
15725   assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||
15726           (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&
15727          "Unexpected constant value");
15728 
15729   SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0));
15730   SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32);
15731   SDValue Cmp = LHS.getOperand(3);
15732 
15733   return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp);
15734 }
15735 
15736 // ADD(UDOT(zero, x, y), A) -->  UDOT(A, x, y)
15737 static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
15738   EVT VT = N->getValueType(0);
15739   if (N->getOpcode() != ISD::ADD)
15740     return SDValue();
15741 
15742   SDValue Dot = N->getOperand(0);
15743   SDValue A = N->getOperand(1);
15744   // Handle commutivity
15745   auto isZeroDot = [](SDValue Dot) {
15746     return (Dot.getOpcode() == AArch64ISD::UDOT ||
15747             Dot.getOpcode() == AArch64ISD::SDOT) &&
15748            isZerosVector(Dot.getOperand(0).getNode());
15749   };
15750   if (!isZeroDot(Dot))
15751     std::swap(Dot, A);
15752   if (!isZeroDot(Dot))
15753     return SDValue();
15754 
15755   return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
15756                      Dot.getOperand(2));
15757 }
15758 
15759 static bool isNegatedInteger(SDValue Op) {
15760   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
15761 }
15762 
15763 static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {
15764   SDLoc DL(Op);
15765   EVT VT = Op.getValueType();
15766   SDValue Zero = DAG.getConstant(0, DL, VT);
15767   return DAG.getNode(ISD::SUB, DL, VT, Zero, Op);
15768 }
15769 
15770 // Try to fold
15771 //
15772 // (neg (csel X, Y)) -> (csel (neg X), (neg Y))
15773 //
15774 // The folding helps csel to be matched with csneg without generating
15775 // redundant neg instruction, which includes negation of the csel expansion
15776 // of abs node lowered by lowerABS.
15777 static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {
15778   if (!isNegatedInteger(SDValue(N, 0)))
15779     return SDValue();
15780 
15781   SDValue CSel = N->getOperand(1);
15782   if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse())
15783     return SDValue();
15784 
15785   SDValue N0 = CSel.getOperand(0);
15786   SDValue N1 = CSel.getOperand(1);
15787 
15788   // If both of them is not negations, it's not worth the folding as it
15789   // introduces two additional negations while reducing one negation.
15790   if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
15791     return SDValue();
15792 
15793   SDValue N0N = getNegatedInteger(N0, DAG);
15794   SDValue N1N = getNegatedInteger(N1, DAG);
15795 
15796   SDLoc DL(N);
15797   EVT VT = CSel.getValueType();
15798   return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2),
15799                      CSel.getOperand(3));
15800 }
15801 
15802 // The basic add/sub long vector instructions have variants with "2" on the end
15803 // which act on the high-half of their inputs. They are normally matched by
15804 // patterns like:
15805 //
15806 // (add (zeroext (extract_high LHS)),
15807 //      (zeroext (extract_high RHS)))
15808 // -> uaddl2 vD, vN, vM
15809 //
15810 // However, if one of the extracts is something like a duplicate, this
15811 // instruction can still be used profitably. This function puts the DAG into a
15812 // more appropriate form for those patterns to trigger.
15813 static SDValue performAddSubLongCombine(SDNode *N,
15814                                         TargetLowering::DAGCombinerInfo &DCI,
15815                                         SelectionDAG &DAG) {
15816   if (DCI.isBeforeLegalizeOps())
15817     return SDValue();
15818 
15819   MVT VT = N->getSimpleValueType(0);
15820   if (!VT.is128BitVector()) {
15821     if (N->getOpcode() == ISD::ADD)
15822       return performSetccAddFolding(N, DAG);
15823     return SDValue();
15824   }
15825 
15826   // Make sure both branches are extended in the same way.
15827   SDValue LHS = N->getOperand(0);
15828   SDValue RHS = N->getOperand(1);
15829   if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
15830        LHS.getOpcode() != ISD::SIGN_EXTEND) ||
15831       LHS.getOpcode() != RHS.getOpcode())
15832     return SDValue();
15833 
15834   unsigned ExtType = LHS.getOpcode();
15835 
15836   // It's not worth doing if at least one of the inputs isn't already an
15837   // extract, but we don't know which it'll be so we have to try both.
15838   if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) {
15839     RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
15840     if (!RHS.getNode())
15841       return SDValue();
15842 
15843     RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
15844   } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) {
15845     LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
15846     if (!LHS.getNode())
15847       return SDValue();
15848 
15849     LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
15850   }
15851 
15852   return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
15853 }
15854 
15855 static bool isCMP(SDValue Op) {
15856   return Op.getOpcode() == AArch64ISD::SUBS &&
15857          !Op.getNode()->hasAnyUseOfValue(0);
15858 }
15859 
15860 // (CSEL 1 0 CC Cond) => CC
15861 // (CSEL 0 1 CC Cond) => !CC
15862 static Optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {
15863   if (Op.getOpcode() != AArch64ISD::CSEL)
15864     return None;
15865   auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
15866   if (CC == AArch64CC::AL || CC == AArch64CC::NV)
15867     return None;
15868   SDValue OpLHS = Op.getOperand(0);
15869   SDValue OpRHS = Op.getOperand(1);
15870   if (isOneConstant(OpLHS) && isNullConstant(OpRHS))
15871     return CC;
15872   if (isNullConstant(OpLHS) && isOneConstant(OpRHS))
15873     return getInvertedCondCode(CC);
15874 
15875   return None;
15876 }
15877 
15878 // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry)
15879 // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
15880 static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {
15881   SDValue CmpOp = Op->getOperand(2);
15882   if (!isCMP(CmpOp))
15883     return SDValue();
15884 
15885   if (IsAdd) {
15886     if (!isOneConstant(CmpOp.getOperand(1)))
15887       return SDValue();
15888   } else {
15889     if (!isNullConstant(CmpOp.getOperand(0)))
15890       return SDValue();
15891   }
15892 
15893   SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1);
15894   auto CC = getCSETCondCode(CsetOp);
15895   if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO))
15896     return SDValue();
15897 
15898   return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(),
15899                      Op->getOperand(0), Op->getOperand(1),
15900                      CsetOp.getOperand(3));
15901 }
15902 
15903 // (ADC x 0 cond) => (CINC x HS cond)
15904 static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {
15905   SDValue LHS = N->getOperand(0);
15906   SDValue RHS = N->getOperand(1);
15907   SDValue Cond = N->getOperand(2);
15908 
15909   if (!isNullConstant(RHS))
15910     return SDValue();
15911 
15912   EVT VT = N->getValueType(0);
15913   SDLoc DL(N);
15914 
15915   // (CINC x cc cond) <=> (CSINC x x !cc cond)
15916   SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32);
15917   return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond);
15918 }
15919 
15920 // Transform vector add(zext i8 to i32, zext i8 to i32)
15921 //  into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32)
15922 // This allows extra uses of saddl/uaddl at the lower vector widths, and less
15923 // extends.
15924 static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) {
15925   EVT VT = N->getValueType(0);
15926   if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 ||
15927       (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
15928        N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) ||
15929       (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
15930        N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) ||
15931       N->getOperand(0).getOperand(0).getValueType() !=
15932           N->getOperand(1).getOperand(0).getValueType())
15933     return SDValue();
15934 
15935   SDValue N0 = N->getOperand(0).getOperand(0);
15936   SDValue N1 = N->getOperand(1).getOperand(0);
15937   EVT InVT = N0.getValueType();
15938 
15939   EVT S1 = InVT.getScalarType();
15940   EVT S2 = VT.getScalarType();
15941   if ((S2 == MVT::i32 && S1 == MVT::i8) ||
15942       (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) {
15943     SDLoc DL(N);
15944     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
15945                                   S2.getHalfSizedIntegerVT(*DAG.getContext()),
15946                                   VT.getVectorElementCount());
15947     SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0);
15948     SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1);
15949     SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1);
15950     return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp);
15951   }
15952   return SDValue();
15953 }
15954 
15955 static SDValue performAddSubCombine(SDNode *N,
15956                                     TargetLowering::DAGCombinerInfo &DCI,
15957                                     SelectionDAG &DAG) {
15958   // Try to change sum of two reductions.
15959   if (SDValue Val = performAddUADDVCombine(N, DAG))
15960     return Val;
15961   if (SDValue Val = performAddDotCombine(N, DAG))
15962     return Val;
15963   if (SDValue Val = performAddCSelIntoCSinc(N, DAG))
15964     return Val;
15965   if (SDValue Val = performNegCSelCombine(N, DAG))
15966     return Val;
15967   if (SDValue Val = performVectorAddSubExtCombine(N, DAG))
15968     return Val;
15969 
15970   return performAddSubLongCombine(N, DCI, DAG);
15971 }
15972 
15973 // Massage DAGs which we can use the high-half "long" operations on into
15974 // something isel will recognize better. E.g.
15975 //
15976 // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
15977 //   (aarch64_neon_umull (extract_high (v2i64 vec)))
15978 //                     (extract_high (v2i64 (dup128 scalar)))))
15979 //
15980 static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
15981                                        TargetLowering::DAGCombinerInfo &DCI,
15982                                        SelectionDAG &DAG) {
15983   if (DCI.isBeforeLegalizeOps())
15984     return SDValue();
15985 
15986   SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1);
15987   SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2);
15988   assert(LHS.getValueType().is64BitVector() &&
15989          RHS.getValueType().is64BitVector() &&
15990          "unexpected shape for long operation");
15991 
15992   // Either node could be a DUP, but it's not worth doing both of them (you'd
15993   // just as well use the non-high version) so look for a corresponding extract
15994   // operation on the other "wing".
15995   if (isEssentiallyExtractHighSubvector(LHS)) {
15996     RHS = tryExtendDUPToExtractHigh(RHS, DAG);
15997     if (!RHS.getNode())
15998       return SDValue();
15999   } else if (isEssentiallyExtractHighSubvector(RHS)) {
16000     LHS = tryExtendDUPToExtractHigh(LHS, DAG);
16001     if (!LHS.getNode())
16002       return SDValue();
16003   }
16004 
16005   if (IID == Intrinsic::not_intrinsic)
16006     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
16007 
16008   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
16009                      N->getOperand(0), LHS, RHS);
16010 }
16011 
16012 static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
16013   MVT ElemTy = N->getSimpleValueType(0).getScalarType();
16014   unsigned ElemBits = ElemTy.getSizeInBits();
16015 
16016   int64_t ShiftAmount;
16017   if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
16018     APInt SplatValue, SplatUndef;
16019     unsigned SplatBitSize;
16020     bool HasAnyUndefs;
16021     if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
16022                               HasAnyUndefs, ElemBits) ||
16023         SplatBitSize != ElemBits)
16024       return SDValue();
16025 
16026     ShiftAmount = SplatValue.getSExtValue();
16027   } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
16028     ShiftAmount = CVN->getSExtValue();
16029   } else
16030     return SDValue();
16031 
16032   unsigned Opcode;
16033   bool IsRightShift;
16034   switch (IID) {
16035   default:
16036     llvm_unreachable("Unknown shift intrinsic");
16037   case Intrinsic::aarch64_neon_sqshl:
16038     Opcode = AArch64ISD::SQSHL_I;
16039     IsRightShift = false;
16040     break;
16041   case Intrinsic::aarch64_neon_uqshl:
16042     Opcode = AArch64ISD::UQSHL_I;
16043     IsRightShift = false;
16044     break;
16045   case Intrinsic::aarch64_neon_srshl:
16046     Opcode = AArch64ISD::SRSHR_I;
16047     IsRightShift = true;
16048     break;
16049   case Intrinsic::aarch64_neon_urshl:
16050     Opcode = AArch64ISD::URSHR_I;
16051     IsRightShift = true;
16052     break;
16053   case Intrinsic::aarch64_neon_sqshlu:
16054     Opcode = AArch64ISD::SQSHLU_I;
16055     IsRightShift = false;
16056     break;
16057   case Intrinsic::aarch64_neon_sshl:
16058   case Intrinsic::aarch64_neon_ushl:
16059     // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
16060     // left shift for positive shift amounts. Below, we only replace the current
16061     // node with VSHL, if this condition is met.
16062     Opcode = AArch64ISD::VSHL;
16063     IsRightShift = false;
16064     break;
16065   }
16066 
16067   if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
16068     SDLoc dl(N);
16069     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16070                        DAG.getConstant(-ShiftAmount, dl, MVT::i32));
16071   } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
16072     SDLoc dl(N);
16073     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16074                        DAG.getConstant(ShiftAmount, dl, MVT::i32));
16075   }
16076 
16077   return SDValue();
16078 }
16079 
16080 // The CRC32[BH] instructions ignore the high bits of their data operand. Since
16081 // the intrinsics must be legal and take an i32, this means there's almost
16082 // certainly going to be a zext in the DAG which we can eliminate.
16083 static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
16084   SDValue AndN = N->getOperand(2);
16085   if (AndN.getOpcode() != ISD::AND)
16086     return SDValue();
16087 
16088   ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
16089   if (!CMask || CMask->getZExtValue() != Mask)
16090     return SDValue();
16091 
16092   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
16093                      N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
16094 }
16095 
16096 static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
16097                                            SelectionDAG &DAG) {
16098   SDLoc dl(N);
16099   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
16100                      DAG.getNode(Opc, dl,
16101                                  N->getOperand(1).getSimpleValueType(),
16102                                  N->getOperand(1)),
16103                      DAG.getConstant(0, dl, MVT::i64));
16104 }
16105 
16106 static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {
16107   SDLoc DL(N);
16108   SDValue Op1 = N->getOperand(1);
16109   SDValue Op2 = N->getOperand(2);
16110   EVT ScalarTy = Op2.getValueType();
16111   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16112     ScalarTy = MVT::i32;
16113 
16114   // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base).
16115   SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0));
16116   SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2);
16117   SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step);
16118   SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1);
16119   return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base);
16120 }
16121 
16122 static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
16123   SDLoc dl(N);
16124   SDValue Scalar = N->getOperand(3);
16125   EVT ScalarTy = Scalar.getValueType();
16126 
16127   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16128     Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
16129 
16130   SDValue Passthru = N->getOperand(1);
16131   SDValue Pred = N->getOperand(2);
16132   return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0),
16133                      Pred, Scalar, Passthru);
16134 }
16135 
16136 static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
16137   SDLoc dl(N);
16138   LLVMContext &Ctx = *DAG.getContext();
16139   EVT VT = N->getValueType(0);
16140 
16141   assert(VT.isScalableVector() && "Expected a scalable vector.");
16142 
16143   // Current lowering only supports the SVE-ACLE types.
16144   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
16145     return SDValue();
16146 
16147   unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
16148   unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8;
16149   EVT ByteVT =
16150       EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
16151 
16152   // Convert everything to the domain of EXT (i.e bytes).
16153   SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1));
16154   SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2));
16155   SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3),
16156                             DAG.getConstant(ElemSize, dl, MVT::i32));
16157 
16158   SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2);
16159   return DAG.getNode(ISD::BITCAST, dl, VT, EXT);
16160 }
16161 
16162 static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
16163                                         TargetLowering::DAGCombinerInfo &DCI,
16164                                         SelectionDAG &DAG) {
16165   if (DCI.isBeforeLegalize())
16166     return SDValue();
16167 
16168   SDValue Comparator = N->getOperand(3);
16169   if (Comparator.getOpcode() == AArch64ISD::DUP ||
16170       Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
16171     unsigned IID = getIntrinsicID(N);
16172     EVT VT = N->getValueType(0);
16173     EVT CmpVT = N->getOperand(2).getValueType();
16174     SDValue Pred = N->getOperand(1);
16175     SDValue Imm;
16176     SDLoc DL(N);
16177 
16178     switch (IID) {
16179     default:
16180       llvm_unreachable("Called with wrong intrinsic!");
16181       break;
16182 
16183     // Signed comparisons
16184     case Intrinsic::aarch64_sve_cmpeq_wide:
16185     case Intrinsic::aarch64_sve_cmpne_wide:
16186     case Intrinsic::aarch64_sve_cmpge_wide:
16187     case Intrinsic::aarch64_sve_cmpgt_wide:
16188     case Intrinsic::aarch64_sve_cmplt_wide:
16189     case Intrinsic::aarch64_sve_cmple_wide: {
16190       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16191         int64_t ImmVal = CN->getSExtValue();
16192         if (ImmVal >= -16 && ImmVal <= 15)
16193           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16194         else
16195           return SDValue();
16196       }
16197       break;
16198     }
16199     // Unsigned comparisons
16200     case Intrinsic::aarch64_sve_cmphs_wide:
16201     case Intrinsic::aarch64_sve_cmphi_wide:
16202     case Intrinsic::aarch64_sve_cmplo_wide:
16203     case Intrinsic::aarch64_sve_cmpls_wide:  {
16204       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16205         uint64_t ImmVal = CN->getZExtValue();
16206         if (ImmVal <= 127)
16207           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16208         else
16209           return SDValue();
16210       }
16211       break;
16212     }
16213     }
16214 
16215     if (!Imm)
16216       return SDValue();
16217 
16218     SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
16219     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred,
16220                        N->getOperand(2), Splat, DAG.getCondCode(CC));
16221   }
16222 
16223   return SDValue();
16224 }
16225 
16226 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
16227                         AArch64CC::CondCode Cond) {
16228   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16229 
16230   SDLoc DL(Op);
16231   assert(Op.getValueType().isScalableVector() &&
16232          TLI.isTypeLegal(Op.getValueType()) &&
16233          "Expected legal scalable vector type!");
16234   assert(Op.getValueType() == Pg.getValueType() &&
16235          "Expected same type for PTEST operands");
16236 
16237   // Ensure target specific opcodes are using legal type.
16238   EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
16239   SDValue TVal = DAG.getConstant(1, DL, OutVT);
16240   SDValue FVal = DAG.getConstant(0, DL, OutVT);
16241 
16242   // Ensure operands have type nxv16i1.
16243   if (Op.getValueType() != MVT::nxv16i1) {
16244     if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) &&
16245         isZeroingInactiveLanes(Op))
16246       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg);
16247     else
16248       Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG);
16249     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op);
16250   }
16251 
16252   // Set condition code (CC) flags.
16253   SDValue Test = DAG.getNode(AArch64ISD::PTEST, DL, MVT::Other, Pg, Op);
16254 
16255   // Convert CC to integer based on requested condition.
16256   // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare.
16257   SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32);
16258   SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test);
16259   return DAG.getZExtOrTrunc(Res, DL, VT);
16260 }
16261 
16262 static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
16263                                       SelectionDAG &DAG) {
16264   SDLoc DL(N);
16265 
16266   SDValue Pred = N->getOperand(1);
16267   SDValue VecToReduce = N->getOperand(2);
16268 
16269   // NOTE: The integer reduction's result type is not always linked to the
16270   // operand's element type so we construct it from the intrinsic's result type.
16271   EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0));
16272   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16273 
16274   // SVE reductions set the whole vector register with the first element
16275   // containing the reduction result, which we'll now extract.
16276   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16277   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16278                      Zero);
16279 }
16280 
16281 static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
16282                                      SelectionDAG &DAG) {
16283   SDLoc DL(N);
16284 
16285   SDValue Pred = N->getOperand(1);
16286   SDValue VecToReduce = N->getOperand(2);
16287 
16288   EVT ReduceVT = VecToReduce.getValueType();
16289   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16290 
16291   // SVE reductions set the whole vector register with the first element
16292   // containing the reduction result, which we'll now extract.
16293   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16294   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16295                      Zero);
16296 }
16297 
16298 static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
16299                                             SelectionDAG &DAG) {
16300   SDLoc DL(N);
16301 
16302   SDValue Pred = N->getOperand(1);
16303   SDValue InitVal = N->getOperand(2);
16304   SDValue VecToReduce = N->getOperand(3);
16305   EVT ReduceVT = VecToReduce.getValueType();
16306 
16307   // Ordered reductions use the first lane of the result vector as the
16308   // reduction's initial value.
16309   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16310   InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT,
16311                         DAG.getUNDEF(ReduceVT), InitVal, Zero);
16312 
16313   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce);
16314 
16315   // SVE reductions set the whole vector register with the first element
16316   // containing the reduction result, which we'll now extract.
16317   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16318                      Zero);
16319 }
16320 
16321 static bool isAllInactivePredicate(SDValue N) {
16322   // Look through cast.
16323   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
16324     N = N.getOperand(0);
16325 
16326   return ISD::isConstantSplatVectorAllZeros(N.getNode());
16327 }
16328 
16329 static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
16330   unsigned NumElts = N.getValueType().getVectorMinNumElements();
16331 
16332   // Look through cast.
16333   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
16334     N = N.getOperand(0);
16335     // When reinterpreting from a type with fewer elements the "new" elements
16336     // are not active, so bail if they're likely to be used.
16337     if (N.getValueType().getVectorMinNumElements() < NumElts)
16338       return false;
16339   }
16340 
16341   if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
16342     return true;
16343 
16344   // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
16345   // or smaller than the implicit element type represented by N.
16346   // NOTE: A larger element count implies a smaller element type.
16347   if (N.getOpcode() == AArch64ISD::PTRUE &&
16348       N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
16349     return N.getValueType().getVectorMinNumElements() >= NumElts;
16350 
16351   // If we're compiling for a specific vector-length, we can check if the
16352   // pattern's VL equals that of the scalable vector at runtime.
16353   if (N.getOpcode() == AArch64ISD::PTRUE) {
16354     const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
16355     unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
16356     unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
16357     if (MaxSVESize && MinSVESize == MaxSVESize) {
16358       unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
16359       unsigned PatNumElts =
16360           getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
16361       return PatNumElts == (NumElts * VScale);
16362     }
16363   }
16364 
16365   return false;
16366 }
16367 
16368 // If a merged operation has no inactive lanes we can relax it to a predicated
16369 // or unpredicated operation, which potentially allows better isel (perhaps
16370 // using immediate forms) or relaxing register reuse requirements.
16371 static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
16372                                        SelectionDAG &DAG, bool UnpredOp = false,
16373                                        bool SwapOperands = false) {
16374   assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!");
16375   assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!");
16376   SDValue Pg = N->getOperand(1);
16377   SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2);
16378   SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
16379 
16380   // ISD way to specify an all active predicate.
16381   if (isAllActivePredicate(DAG, Pg)) {
16382     if (UnpredOp)
16383       return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
16384 
16385     return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2);
16386   }
16387 
16388   // FUTURE: SplatVector(true)
16389   return SDValue();
16390 }
16391 
16392 static SDValue performIntrinsicCombine(SDNode *N,
16393                                        TargetLowering::DAGCombinerInfo &DCI,
16394                                        const AArch64Subtarget *Subtarget) {
16395   SelectionDAG &DAG = DCI.DAG;
16396   unsigned IID = getIntrinsicID(N);
16397   switch (IID) {
16398   default:
16399     break;
16400   case Intrinsic::get_active_lane_mask: {
16401     SDValue Res = SDValue();
16402     EVT VT = N->getValueType(0);
16403     if (VT.isFixedLengthVector()) {
16404       // We can use the SVE whilelo instruction to lower this intrinsic by
16405       // creating the appropriate sequence of scalable vector operations and
16406       // then extracting a fixed-width subvector from the scalable vector.
16407 
16408       SDLoc DL(N);
16409       SDValue ID =
16410           DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
16411 
16412       EVT WhileVT = EVT::getVectorVT(
16413           *DAG.getContext(), MVT::i1,
16414           ElementCount::getScalable(VT.getVectorNumElements()));
16415 
16416       // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32.
16417       EVT PromVT = getPromotedVTForPredicate(WhileVT);
16418 
16419       // Get the fixed-width equivalent of PromVT for extraction.
16420       EVT ExtVT =
16421           EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(),
16422                            VT.getVectorElementCount());
16423 
16424       Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
16425                         N->getOperand(1), N->getOperand(2));
16426       Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res);
16427       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res,
16428                         DAG.getConstant(0, DL, MVT::i64));
16429       Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
16430     }
16431     return Res;
16432   }
16433   case Intrinsic::aarch64_neon_vcvtfxs2fp:
16434   case Intrinsic::aarch64_neon_vcvtfxu2fp:
16435     return tryCombineFixedPointConvert(N, DCI, DAG);
16436   case Intrinsic::aarch64_neon_saddv:
16437     return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
16438   case Intrinsic::aarch64_neon_uaddv:
16439     return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
16440   case Intrinsic::aarch64_neon_sminv:
16441     return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
16442   case Intrinsic::aarch64_neon_uminv:
16443     return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
16444   case Intrinsic::aarch64_neon_smaxv:
16445     return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
16446   case Intrinsic::aarch64_neon_umaxv:
16447     return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
16448   case Intrinsic::aarch64_neon_fmax:
16449     return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0),
16450                        N->getOperand(1), N->getOperand(2));
16451   case Intrinsic::aarch64_neon_fmin:
16452     return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0),
16453                        N->getOperand(1), N->getOperand(2));
16454   case Intrinsic::aarch64_neon_fmaxnm:
16455     return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0),
16456                        N->getOperand(1), N->getOperand(2));
16457   case Intrinsic::aarch64_neon_fminnm:
16458     return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0),
16459                        N->getOperand(1), N->getOperand(2));
16460   case Intrinsic::aarch64_neon_smull:
16461     return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0),
16462                        N->getOperand(1), N->getOperand(2));
16463   case Intrinsic::aarch64_neon_umull:
16464     return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0),
16465                        N->getOperand(1), N->getOperand(2));
16466   case Intrinsic::aarch64_neon_pmull:
16467   case Intrinsic::aarch64_neon_sqdmull:
16468     return tryCombineLongOpWithDup(IID, N, DCI, DAG);
16469   case Intrinsic::aarch64_neon_sqshl:
16470   case Intrinsic::aarch64_neon_uqshl:
16471   case Intrinsic::aarch64_neon_sqshlu:
16472   case Intrinsic::aarch64_neon_srshl:
16473   case Intrinsic::aarch64_neon_urshl:
16474   case Intrinsic::aarch64_neon_sshl:
16475   case Intrinsic::aarch64_neon_ushl:
16476     return tryCombineShiftImm(IID, N, DAG);
16477   case Intrinsic::aarch64_crc32b:
16478   case Intrinsic::aarch64_crc32cb:
16479     return tryCombineCRC32(0xff, N, DAG);
16480   case Intrinsic::aarch64_crc32h:
16481   case Intrinsic::aarch64_crc32ch:
16482     return tryCombineCRC32(0xffff, N, DAG);
16483   case Intrinsic::aarch64_sve_saddv:
16484     // There is no i64 version of SADDV because the sign is irrelevant.
16485     if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64)
16486       return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16487     else
16488       return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG);
16489   case Intrinsic::aarch64_sve_uaddv:
16490     return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16491   case Intrinsic::aarch64_sve_smaxv:
16492     return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG);
16493   case Intrinsic::aarch64_sve_umaxv:
16494     return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG);
16495   case Intrinsic::aarch64_sve_sminv:
16496     return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG);
16497   case Intrinsic::aarch64_sve_uminv:
16498     return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG);
16499   case Intrinsic::aarch64_sve_orv:
16500     return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG);
16501   case Intrinsic::aarch64_sve_eorv:
16502     return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG);
16503   case Intrinsic::aarch64_sve_andv:
16504     return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG);
16505   case Intrinsic::aarch64_sve_index:
16506     return LowerSVEIntrinsicIndex(N, DAG);
16507   case Intrinsic::aarch64_sve_dup:
16508     return LowerSVEIntrinsicDUP(N, DAG);
16509   case Intrinsic::aarch64_sve_dup_x:
16510     return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
16511                        N->getOperand(1));
16512   case Intrinsic::aarch64_sve_ext:
16513     return LowerSVEIntrinsicEXT(N, DAG);
16514   case Intrinsic::aarch64_sve_mul:
16515     return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG);
16516   case Intrinsic::aarch64_sve_smulh:
16517     return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG);
16518   case Intrinsic::aarch64_sve_umulh:
16519     return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG);
16520   case Intrinsic::aarch64_sve_smin:
16521     return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG);
16522   case Intrinsic::aarch64_sve_umin:
16523     return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG);
16524   case Intrinsic::aarch64_sve_smax:
16525     return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG);
16526   case Intrinsic::aarch64_sve_umax:
16527     return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG);
16528   case Intrinsic::aarch64_sve_lsl:
16529     return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG);
16530   case Intrinsic::aarch64_sve_lsr:
16531     return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG);
16532   case Intrinsic::aarch64_sve_asr:
16533     return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG);
16534   case Intrinsic::aarch64_sve_fadd:
16535     return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG);
16536   case Intrinsic::aarch64_sve_fsub:
16537     return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG);
16538   case Intrinsic::aarch64_sve_fmul:
16539     return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG);
16540   case Intrinsic::aarch64_sve_add:
16541     return convertMergedOpToPredOp(N, ISD::ADD, DAG, true);
16542   case Intrinsic::aarch64_sve_sub:
16543     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true);
16544   case Intrinsic::aarch64_sve_subr:
16545     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true);
16546   case Intrinsic::aarch64_sve_and:
16547     return convertMergedOpToPredOp(N, ISD::AND, DAG, true);
16548   case Intrinsic::aarch64_sve_bic:
16549     return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true);
16550   case Intrinsic::aarch64_sve_eor:
16551     return convertMergedOpToPredOp(N, ISD::XOR, DAG, true);
16552   case Intrinsic::aarch64_sve_orr:
16553     return convertMergedOpToPredOp(N, ISD::OR, DAG, true);
16554   case Intrinsic::aarch64_sve_sabd:
16555     return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true);
16556   case Intrinsic::aarch64_sve_uabd:
16557     return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true);
16558   case Intrinsic::aarch64_sve_sqadd:
16559     return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true);
16560   case Intrinsic::aarch64_sve_sqsub:
16561     return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true);
16562   case Intrinsic::aarch64_sve_uqadd:
16563     return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true);
16564   case Intrinsic::aarch64_sve_uqsub:
16565     return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true);
16566   case Intrinsic::aarch64_sve_sqadd_x:
16567     return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0),
16568                        N->getOperand(1), N->getOperand(2));
16569   case Intrinsic::aarch64_sve_sqsub_x:
16570     return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0),
16571                        N->getOperand(1), N->getOperand(2));
16572   case Intrinsic::aarch64_sve_uqadd_x:
16573     return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0),
16574                        N->getOperand(1), N->getOperand(2));
16575   case Intrinsic::aarch64_sve_uqsub_x:
16576     return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0),
16577                        N->getOperand(1), N->getOperand(2));
16578   case Intrinsic::aarch64_sve_asrd:
16579     return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0),
16580                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16581   case Intrinsic::aarch64_sve_cmphs:
16582     if (!N->getOperand(2).getValueType().isFloatingPoint())
16583       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16584                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16585                          N->getOperand(3), DAG.getCondCode(ISD::SETUGE));
16586     break;
16587   case Intrinsic::aarch64_sve_cmphi:
16588     if (!N->getOperand(2).getValueType().isFloatingPoint())
16589       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16590                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16591                          N->getOperand(3), DAG.getCondCode(ISD::SETUGT));
16592     break;
16593   case Intrinsic::aarch64_sve_fcmpge:
16594   case Intrinsic::aarch64_sve_cmpge:
16595     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16596                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16597                        N->getOperand(3), DAG.getCondCode(ISD::SETGE));
16598     break;
16599   case Intrinsic::aarch64_sve_fcmpgt:
16600   case Intrinsic::aarch64_sve_cmpgt:
16601     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16602                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16603                        N->getOperand(3), DAG.getCondCode(ISD::SETGT));
16604     break;
16605   case Intrinsic::aarch64_sve_fcmpeq:
16606   case Intrinsic::aarch64_sve_cmpeq:
16607     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16608                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16609                        N->getOperand(3), DAG.getCondCode(ISD::SETEQ));
16610     break;
16611   case Intrinsic::aarch64_sve_fcmpne:
16612   case Intrinsic::aarch64_sve_cmpne:
16613     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16614                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16615                        N->getOperand(3), DAG.getCondCode(ISD::SETNE));
16616     break;
16617   case Intrinsic::aarch64_sve_fcmpuo:
16618     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16619                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16620                        N->getOperand(3), DAG.getCondCode(ISD::SETUO));
16621     break;
16622   case Intrinsic::aarch64_sve_fadda:
16623     return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
16624   case Intrinsic::aarch64_sve_faddv:
16625     return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG);
16626   case Intrinsic::aarch64_sve_fmaxnmv:
16627     return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG);
16628   case Intrinsic::aarch64_sve_fmaxv:
16629     return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG);
16630   case Intrinsic::aarch64_sve_fminnmv:
16631     return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG);
16632   case Intrinsic::aarch64_sve_fminv:
16633     return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG);
16634   case Intrinsic::aarch64_sve_sel:
16635     return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0),
16636                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16637   case Intrinsic::aarch64_sve_cmpeq_wide:
16638     return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG);
16639   case Intrinsic::aarch64_sve_cmpne_wide:
16640     return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG);
16641   case Intrinsic::aarch64_sve_cmpge_wide:
16642     return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG);
16643   case Intrinsic::aarch64_sve_cmpgt_wide:
16644     return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG);
16645   case Intrinsic::aarch64_sve_cmplt_wide:
16646     return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG);
16647   case Intrinsic::aarch64_sve_cmple_wide:
16648     return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG);
16649   case Intrinsic::aarch64_sve_cmphs_wide:
16650     return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG);
16651   case Intrinsic::aarch64_sve_cmphi_wide:
16652     return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG);
16653   case Intrinsic::aarch64_sve_cmplo_wide:
16654     return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG);
16655   case Intrinsic::aarch64_sve_cmpls_wide:
16656     return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG);
16657   case Intrinsic::aarch64_sve_ptest_any:
16658     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16659                     AArch64CC::ANY_ACTIVE);
16660   case Intrinsic::aarch64_sve_ptest_first:
16661     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16662                     AArch64CC::FIRST_ACTIVE);
16663   case Intrinsic::aarch64_sve_ptest_last:
16664     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16665                     AArch64CC::LAST_ACTIVE);
16666   }
16667   return SDValue();
16668 }
16669 
16670 static bool isCheapToExtend(const SDValue &N) {
16671   unsigned OC = N->getOpcode();
16672   return OC == ISD::LOAD || OC == ISD::MLOAD ||
16673          ISD::isConstantSplatVectorAllZeros(N.getNode());
16674 }
16675 
16676 static SDValue
16677 performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
16678                               SelectionDAG &DAG) {
16679   // If we have (sext (setcc A B)) and A and B are cheap to extend,
16680   // we can move the sext into the arguments and have the same result. For
16681   // example, if A and B are both loads, we can make those extending loads and
16682   // avoid an extra instruction. This pattern appears often in VLS code
16683   // generation where the inputs to the setcc have a different size to the
16684   // instruction that wants to use the result of the setcc.
16685   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
16686          N->getOperand(0)->getOpcode() == ISD::SETCC);
16687   const SDValue SetCC = N->getOperand(0);
16688 
16689   const SDValue CCOp0 = SetCC.getOperand(0);
16690   const SDValue CCOp1 = SetCC.getOperand(1);
16691   if (!CCOp0->getValueType(0).isInteger() ||
16692       !CCOp1->getValueType(0).isInteger())
16693     return SDValue();
16694 
16695   ISD::CondCode Code =
16696       cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get();
16697 
16698   ISD::NodeType ExtType =
16699       isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16700 
16701   if (isCheapToExtend(SetCC.getOperand(0)) &&
16702       isCheapToExtend(SetCC.getOperand(1))) {
16703     const SDValue Ext1 =
16704         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0);
16705     const SDValue Ext2 =
16706         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1);
16707 
16708     return DAG.getSetCC(
16709         SDLoc(SetCC), N->getValueType(0), Ext1, Ext2,
16710         cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get());
16711   }
16712 
16713   return SDValue();
16714 }
16715 
16716 static SDValue performExtendCombine(SDNode *N,
16717                                     TargetLowering::DAGCombinerInfo &DCI,
16718                                     SelectionDAG &DAG) {
16719   // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
16720   // we can convert that DUP into another extract_high (of a bigger DUP), which
16721   // helps the backend to decide that an sabdl2 would be useful, saving a real
16722   // extract_high operation.
16723   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
16724       (N->getOperand(0).getOpcode() == ISD::ABDU ||
16725        N->getOperand(0).getOpcode() == ISD::ABDS)) {
16726     SDNode *ABDNode = N->getOperand(0).getNode();
16727     SDValue NewABD =
16728         tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG);
16729     if (!NewABD.getNode())
16730       return SDValue();
16731 
16732     return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD);
16733   }
16734 
16735   if (N->getValueType(0).isFixedLengthVector() &&
16736       N->getOpcode() == ISD::SIGN_EXTEND &&
16737       N->getOperand(0)->getOpcode() == ISD::SETCC)
16738     return performSignExtendSetCCCombine(N, DCI, DAG);
16739 
16740   return SDValue();
16741 }
16742 
16743 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
16744                                SDValue SplatVal, unsigned NumVecElts) {
16745   assert(!St.isTruncatingStore() && "cannot split truncating vector store");
16746   Align OrigAlignment = St.getAlign();
16747   unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
16748 
16749   // Create scalar stores. This is at least as good as the code sequence for a
16750   // split unaligned store which is a dup.s, ext.b, and two stores.
16751   // Most of the time the three stores should be replaced by store pair
16752   // instructions (stp).
16753   SDLoc DL(&St);
16754   SDValue BasePtr = St.getBasePtr();
16755   uint64_t BaseOffset = 0;
16756 
16757   const MachinePointerInfo &PtrInfo = St.getPointerInfo();
16758   SDValue NewST1 =
16759       DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
16760                    OrigAlignment, St.getMemOperand()->getFlags());
16761 
16762   // As this in ISel, we will not merge this add which may degrade results.
16763   if (BasePtr->getOpcode() == ISD::ADD &&
16764       isa<ConstantSDNode>(BasePtr->getOperand(1))) {
16765     BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
16766     BasePtr = BasePtr->getOperand(0);
16767   }
16768 
16769   unsigned Offset = EltOffset;
16770   while (--NumVecElts) {
16771     Align Alignment = commonAlignment(OrigAlignment, Offset);
16772     SDValue OffsetPtr =
16773         DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
16774                     DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
16775     NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
16776                           PtrInfo.getWithOffset(Offset), Alignment,
16777                           St.getMemOperand()->getFlags());
16778     Offset += EltOffset;
16779   }
16780   return NewST1;
16781 }
16782 
16783 // Returns an SVE type that ContentTy can be trivially sign or zero extended
16784 // into.
16785 static MVT getSVEContainerType(EVT ContentTy) {
16786   assert(ContentTy.isSimple() && "No SVE containers for extended types");
16787 
16788   switch (ContentTy.getSimpleVT().SimpleTy) {
16789   default:
16790     llvm_unreachable("No known SVE container for this MVT type");
16791   case MVT::nxv2i8:
16792   case MVT::nxv2i16:
16793   case MVT::nxv2i32:
16794   case MVT::nxv2i64:
16795   case MVT::nxv2f32:
16796   case MVT::nxv2f64:
16797     return MVT::nxv2i64;
16798   case MVT::nxv4i8:
16799   case MVT::nxv4i16:
16800   case MVT::nxv4i32:
16801   case MVT::nxv4f32:
16802     return MVT::nxv4i32;
16803   case MVT::nxv8i8:
16804   case MVT::nxv8i16:
16805   case MVT::nxv8f16:
16806   case MVT::nxv8bf16:
16807     return MVT::nxv8i16;
16808   case MVT::nxv16i8:
16809     return MVT::nxv16i8;
16810   }
16811 }
16812 
16813 static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {
16814   SDLoc DL(N);
16815   EVT VT = N->getValueType(0);
16816 
16817   if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
16818     return SDValue();
16819 
16820   EVT ContainerVT = VT;
16821   if (ContainerVT.isInteger())
16822     ContainerVT = getSVEContainerType(ContainerVT);
16823 
16824   SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other);
16825   SDValue Ops[] = { N->getOperand(0), // Chain
16826                     N->getOperand(2), // Pg
16827                     N->getOperand(3), // Base
16828                     DAG.getValueType(VT) };
16829 
16830   SDValue Load = DAG.getNode(Opc, DL, VTs, Ops);
16831   SDValue LoadChain = SDValue(Load.getNode(), 1);
16832 
16833   if (ContainerVT.isInteger() && (VT != ContainerVT))
16834     Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0));
16835 
16836   return DAG.getMergeValues({ Load, LoadChain }, DL);
16837 }
16838 
16839 static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
16840   SDLoc DL(N);
16841   EVT VT = N->getValueType(0);
16842   EVT PtrTy = N->getOperand(3).getValueType();
16843 
16844   EVT LoadVT = VT;
16845   if (VT.isFloatingPoint())
16846     LoadVT = VT.changeTypeToInteger();
16847 
16848   auto *MINode = cast<MemIntrinsicSDNode>(N);
16849   SDValue PassThru = DAG.getConstant(0, DL, LoadVT);
16850   SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(),
16851                                 MINode->getOperand(3), DAG.getUNDEF(PtrTy),
16852                                 MINode->getOperand(2), PassThru,
16853                                 MINode->getMemoryVT(), MINode->getMemOperand(),
16854                                 ISD::UNINDEXED, ISD::NON_EXTLOAD, false);
16855 
16856    if (VT.isFloatingPoint()) {
16857      SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) };
16858      return DAG.getMergeValues(Ops, DL);
16859    }
16860 
16861   return L;
16862 }
16863 
16864 template <unsigned Opcode>
16865 static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
16866   static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO ||
16867                     Opcode == AArch64ISD::LD1RO_MERGE_ZERO,
16868                 "Unsupported opcode.");
16869   SDLoc DL(N);
16870   EVT VT = N->getValueType(0);
16871 
16872   EVT LoadVT = VT;
16873   if (VT.isFloatingPoint())
16874     LoadVT = VT.changeTypeToInteger();
16875 
16876   SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
16877   SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
16878   SDValue LoadChain = SDValue(Load.getNode(), 1);
16879 
16880   if (VT.isFloatingPoint())
16881     Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
16882 
16883   return DAG.getMergeValues({Load, LoadChain}, DL);
16884 }
16885 
16886 static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
16887   SDLoc DL(N);
16888   SDValue Data = N->getOperand(2);
16889   EVT DataVT = Data.getValueType();
16890   EVT HwSrcVt = getSVEContainerType(DataVT);
16891   SDValue InputVT = DAG.getValueType(DataVT);
16892 
16893   if (DataVT.isFloatingPoint())
16894     InputVT = DAG.getValueType(HwSrcVt);
16895 
16896   SDValue SrcNew;
16897   if (Data.getValueType().isFloatingPoint())
16898     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data);
16899   else
16900     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data);
16901 
16902   SDValue Ops[] = { N->getOperand(0), // Chain
16903                     SrcNew,
16904                     N->getOperand(4), // Base
16905                     N->getOperand(3), // Pg
16906                     InputVT
16907                   };
16908 
16909   return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops);
16910 }
16911 
16912 static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
16913   SDLoc DL(N);
16914 
16915   SDValue Data = N->getOperand(2);
16916   EVT DataVT = Data.getValueType();
16917   EVT PtrTy = N->getOperand(4).getValueType();
16918 
16919   if (DataVT.isFloatingPoint())
16920     Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data);
16921 
16922   auto *MINode = cast<MemIntrinsicSDNode>(N);
16923   return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4),
16924                             DAG.getUNDEF(PtrTy), MINode->getOperand(3),
16925                             MINode->getMemoryVT(), MINode->getMemOperand(),
16926                             ISD::UNINDEXED, false, false);
16927 }
16928 
16929 /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.  The
16930 /// load store optimizer pass will merge them to store pair stores.  This should
16931 /// be better than a movi to create the vector zero followed by a vector store
16932 /// if the zero constant is not re-used, since one instructions and one register
16933 /// live range will be removed.
16934 ///
16935 /// For example, the final generated code should be:
16936 ///
16937 ///   stp xzr, xzr, [x0]
16938 ///
16939 /// instead of:
16940 ///
16941 ///   movi v0.2d, #0
16942 ///   str q0, [x0]
16943 ///
16944 static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
16945   SDValue StVal = St.getValue();
16946   EVT VT = StVal.getValueType();
16947 
16948   // Avoid scalarizing zero splat stores for scalable vectors.
16949   if (VT.isScalableVector())
16950     return SDValue();
16951 
16952   // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or
16953   // 2, 3 or 4 i32 elements.
16954   int NumVecElts = VT.getVectorNumElements();
16955   if (!(((NumVecElts == 2 || NumVecElts == 3) &&
16956          VT.getVectorElementType().getSizeInBits() == 64) ||
16957         ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
16958          VT.getVectorElementType().getSizeInBits() == 32)))
16959     return SDValue();
16960 
16961   if (StVal.getOpcode() != ISD::BUILD_VECTOR)
16962     return SDValue();
16963 
16964   // If the zero constant has more than one use then the vector store could be
16965   // better since the constant mov will be amortized and stp q instructions
16966   // should be able to be formed.
16967   if (!StVal.hasOneUse())
16968     return SDValue();
16969 
16970   // If the store is truncating then it's going down to i16 or smaller, which
16971   // means it can be implemented in a single store anyway.
16972   if (St.isTruncatingStore())
16973     return SDValue();
16974 
16975   // If the immediate offset of the address operand is too large for the stp
16976   // instruction, then bail out.
16977   if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
16978     int64_t Offset = St.getBasePtr()->getConstantOperandVal(1);
16979     if (Offset < -512 || Offset > 504)
16980       return SDValue();
16981   }
16982 
16983   for (int I = 0; I < NumVecElts; ++I) {
16984     SDValue EltVal = StVal.getOperand(I);
16985     if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal))
16986       return SDValue();
16987   }
16988 
16989   // Use a CopyFromReg WZR/XZR here to prevent
16990   // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
16991   SDLoc DL(&St);
16992   unsigned ZeroReg;
16993   EVT ZeroVT;
16994   if (VT.getVectorElementType().getSizeInBits() == 32) {
16995     ZeroReg = AArch64::WZR;
16996     ZeroVT = MVT::i32;
16997   } else {
16998     ZeroReg = AArch64::XZR;
16999     ZeroVT = MVT::i64;
17000   }
17001   SDValue SplatVal =
17002       DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
17003   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17004 }
17005 
17006 /// Replace a splat of a scalar to a vector store by scalar stores of the scalar
17007 /// value. The load store optimizer pass will merge them to store pair stores.
17008 /// This has better performance than a splat of the scalar followed by a split
17009 /// vector store. Even if the stores are not merged it is four stores vs a dup,
17010 /// followed by an ext.b and two stores.
17011 static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
17012   SDValue StVal = St.getValue();
17013   EVT VT = StVal.getValueType();
17014 
17015   // Don't replace floating point stores, they possibly won't be transformed to
17016   // stp because of the store pair suppress pass.
17017   if (VT.isFloatingPoint())
17018     return SDValue();
17019 
17020   // We can express a splat as store pair(s) for 2 or 4 elements.
17021   unsigned NumVecElts = VT.getVectorNumElements();
17022   if (NumVecElts != 4 && NumVecElts != 2)
17023     return SDValue();
17024 
17025   // If the store is truncating then it's going down to i16 or smaller, which
17026   // means it can be implemented in a single store anyway.
17027   if (St.isTruncatingStore())
17028     return SDValue();
17029 
17030   // Check that this is a splat.
17031   // Make sure that each of the relevant vector element locations are inserted
17032   // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
17033   std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
17034   SDValue SplatVal;
17035   for (unsigned I = 0; I < NumVecElts; ++I) {
17036     // Check for insert vector elements.
17037     if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
17038       return SDValue();
17039 
17040     // Check that same value is inserted at each vector element.
17041     if (I == 0)
17042       SplatVal = StVal.getOperand(1);
17043     else if (StVal.getOperand(1) != SplatVal)
17044       return SDValue();
17045 
17046     // Check insert element index.
17047     ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2));
17048     if (!CIndex)
17049       return SDValue();
17050     uint64_t IndexVal = CIndex->getZExtValue();
17051     if (IndexVal >= NumVecElts)
17052       return SDValue();
17053     IndexNotInserted.reset(IndexVal);
17054 
17055     StVal = StVal.getOperand(0);
17056   }
17057   // Check that all vector element locations were inserted to.
17058   if (IndexNotInserted.any())
17059       return SDValue();
17060 
17061   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17062 }
17063 
17064 static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17065                            SelectionDAG &DAG,
17066                            const AArch64Subtarget *Subtarget) {
17067 
17068   StoreSDNode *S = cast<StoreSDNode>(N);
17069   if (S->isVolatile() || S->isIndexed())
17070     return SDValue();
17071 
17072   SDValue StVal = S->getValue();
17073   EVT VT = StVal.getValueType();
17074 
17075   if (!VT.isFixedLengthVector())
17076     return SDValue();
17077 
17078   // If we get a splat of zeros, convert this vector store to a store of
17079   // scalars. They will be merged into store pairs of xzr thereby removing one
17080   // instruction and one register.
17081   if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S))
17082     return ReplacedZeroSplat;
17083 
17084   // FIXME: The logic for deciding if an unaligned store should be split should
17085   // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
17086   // a call to that function here.
17087 
17088   if (!Subtarget->isMisaligned128StoreSlow())
17089     return SDValue();
17090 
17091   // Don't split at -Oz.
17092   if (DAG.getMachineFunction().getFunction().hasMinSize())
17093     return SDValue();
17094 
17095   // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
17096   // those up regresses performance on micro-benchmarks and olden/bh.
17097   if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
17098     return SDValue();
17099 
17100   // Split unaligned 16B stores. They are terrible for performance.
17101   // Don't split stores with alignment of 1 or 2. Code that uses clang vector
17102   // extensions can use this to mark that it does not want splitting to happen
17103   // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
17104   // eliminating alignment hazards is only 1 in 8 for alignment of 2.
17105   if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) ||
17106       S->getAlign() <= Align(2))
17107     return SDValue();
17108 
17109   // If we get a splat of a scalar convert this vector store to a store of
17110   // scalars. They will be merged into store pairs thereby removing two
17111   // instructions.
17112   if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S))
17113     return ReplacedSplat;
17114 
17115   SDLoc DL(S);
17116 
17117   // Split VT into two.
17118   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
17119   unsigned NumElts = HalfVT.getVectorNumElements();
17120   SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17121                                    DAG.getConstant(0, DL, MVT::i64));
17122   SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17123                                    DAG.getConstant(NumElts, DL, MVT::i64));
17124   SDValue BasePtr = S->getBasePtr();
17125   SDValue NewST1 =
17126       DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
17127                    S->getAlign(), S->getMemOperand()->getFlags());
17128   SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
17129                                   DAG.getConstant(8, DL, MVT::i64));
17130   return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
17131                       S->getPointerInfo(), S->getAlign(),
17132                       S->getMemOperand()->getFlags());
17133 }
17134 
17135 static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
17136   assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
17137 
17138   // splice(pg, op1, undef) -> op1
17139   if (N->getOperand(2).isUndef())
17140     return N->getOperand(1);
17141 
17142   return SDValue();
17143 }
17144 
17145 static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG) {
17146   assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
17147           N->getOpcode() == AArch64ISD::UUNPKLO) &&
17148          "Unexpected Opcode!");
17149 
17150   // uunpklo/hi undef -> undef
17151   if (N->getOperand(0).isUndef())
17152     return DAG.getUNDEF(N->getValueType(0));
17153 
17154   return SDValue();
17155 }
17156 
17157 static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) {
17158   SDLoc DL(N);
17159   SDValue Op0 = N->getOperand(0);
17160   SDValue Op1 = N->getOperand(1);
17161   EVT ResVT = N->getValueType(0);
17162 
17163   // uzp1(x, undef) -> concat(truncate(x), undef)
17164   if (Op1.getOpcode() == ISD::UNDEF) {
17165     EVT BCVT = MVT::Other, HalfVT = MVT::Other;
17166     switch (ResVT.getSimpleVT().SimpleTy) {
17167     default:
17168       break;
17169     case MVT::v16i8:
17170       BCVT = MVT::v8i16;
17171       HalfVT = MVT::v8i8;
17172       break;
17173     case MVT::v8i16:
17174       BCVT = MVT::v4i32;
17175       HalfVT = MVT::v4i16;
17176       break;
17177     case MVT::v4i32:
17178       BCVT = MVT::v2i64;
17179       HalfVT = MVT::v2i32;
17180       break;
17181     }
17182     if (BCVT != MVT::Other) {
17183       SDValue BC = DAG.getBitcast(BCVT, Op0);
17184       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC);
17185       return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc,
17186                          DAG.getUNDEF(HalfVT));
17187     }
17188   }
17189 
17190   // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
17191   if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
17192     if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17193       SDValue X = Op0.getOperand(0).getOperand(0);
17194       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
17195     }
17196   }
17197 
17198   // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
17199   if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
17200     if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17201       SDValue Z = Op1.getOperand(0).getOperand(1);
17202       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
17203     }
17204   }
17205 
17206   return SDValue();
17207 }
17208 
17209 static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {
17210   unsigned Opc = N->getOpcode();
17211 
17212   assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads
17213            Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||
17214           (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads
17215            Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&
17216          "Invalid opcode.");
17217 
17218   const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO ||
17219                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17220   const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO ||
17221                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17222   const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO ||
17223                         Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO ||
17224                         Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO ||
17225                         Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO;
17226 
17227   SDLoc DL(N);
17228   SDValue Chain = N->getOperand(0);
17229   SDValue Pg = N->getOperand(1);
17230   SDValue Base = N->getOperand(2);
17231   SDValue Offset = N->getOperand(3);
17232   SDValue Ty = N->getOperand(4);
17233 
17234   EVT ResVT = N->getValueType(0);
17235 
17236   const auto OffsetOpc = Offset.getOpcode();
17237   const bool OffsetIsZExt =
17238       OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU;
17239   const bool OffsetIsSExt =
17240       OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU;
17241 
17242   // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible.
17243   if (!Extended && (OffsetIsSExt || OffsetIsZExt)) {
17244     SDValue ExtPg = Offset.getOperand(0);
17245     VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode());
17246     EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType();
17247 
17248     // If the predicate for the sign- or zero-extended offset is the
17249     // same as the predicate used for this load and the sign-/zero-extension
17250     // was from a 32-bits...
17251     if (ExtPg == Pg && ExtFromEVT == MVT::i32) {
17252       SDValue UnextendedOffset = Offset.getOperand(1);
17253 
17254       unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true);
17255       if (Signed)
17256         NewOpc = getSignExtendedGatherOpcode(NewOpc);
17257 
17258       return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other},
17259                          {Chain, Pg, Base, UnextendedOffset, Ty});
17260     }
17261   }
17262 
17263   return SDValue();
17264 }
17265 
17266 /// Optimize a vector shift instruction and its operand if shifted out
17267 /// bits are not used.
17268 static SDValue performVectorShiftCombine(SDNode *N,
17269                                          const AArch64TargetLowering &TLI,
17270                                          TargetLowering::DAGCombinerInfo &DCI) {
17271   assert(N->getOpcode() == AArch64ISD::VASHR ||
17272          N->getOpcode() == AArch64ISD::VLSHR);
17273 
17274   SDValue Op = N->getOperand(0);
17275   unsigned OpScalarSize = Op.getScalarValueSizeInBits();
17276 
17277   unsigned ShiftImm = N->getConstantOperandVal(1);
17278   assert(OpScalarSize > ShiftImm && "Invalid shift imm");
17279 
17280   APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm);
17281   APInt DemandedMask = ~ShiftedOutBits;
17282 
17283   if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
17284     return SDValue(N, 0);
17285 
17286   return SDValue();
17287 }
17288 
17289 static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {
17290   // sunpklo(sext(pred)) -> sext(extract_low_half(pred))
17291   // This transform works in partnership with performSetCCPunpkCombine to
17292   // remove unnecessary transfer of predicates into standard registers and back
17293   if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND &&
17294       N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() ==
17295           MVT::i1) {
17296     SDValue CC = N->getOperand(0)->getOperand(0);
17297     auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext());
17298     SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC,
17299                                DAG.getVectorIdxConstant(0, SDLoc(N)));
17300     return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk);
17301   }
17302 
17303   return SDValue();
17304 }
17305 
17306 /// Target-specific DAG combine function for post-increment LD1 (lane) and
17307 /// post-increment LD1R.
17308 static SDValue performPostLD1Combine(SDNode *N,
17309                                      TargetLowering::DAGCombinerInfo &DCI,
17310                                      bool IsLaneOp) {
17311   if (DCI.isBeforeLegalizeOps())
17312     return SDValue();
17313 
17314   SelectionDAG &DAG = DCI.DAG;
17315   EVT VT = N->getValueType(0);
17316 
17317   if (!VT.is128BitVector() && !VT.is64BitVector())
17318     return SDValue();
17319 
17320   unsigned LoadIdx = IsLaneOp ? 1 : 0;
17321   SDNode *LD = N->getOperand(LoadIdx).getNode();
17322   // If it is not LOAD, can not do such combine.
17323   if (LD->getOpcode() != ISD::LOAD)
17324     return SDValue();
17325 
17326   // The vector lane must be a constant in the LD1LANE opcode.
17327   SDValue Lane;
17328   if (IsLaneOp) {
17329     Lane = N->getOperand(2);
17330     auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
17331     if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
17332       return SDValue();
17333   }
17334 
17335   LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
17336   EVT MemVT = LoadSDN->getMemoryVT();
17337   // Check if memory operand is the same type as the vector element.
17338   if (MemVT != VT.getVectorElementType())
17339     return SDValue();
17340 
17341   // Check if there are other uses. If so, do not combine as it will introduce
17342   // an extra load.
17343   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
17344        ++UI) {
17345     if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
17346       continue;
17347     if (*UI != N)
17348       return SDValue();
17349   }
17350 
17351   SDValue Addr = LD->getOperand(1);
17352   SDValue Vector = N->getOperand(0);
17353   // Search for a use of the address operand that is an increment.
17354   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
17355        Addr.getNode()->use_end(); UI != UE; ++UI) {
17356     SDNode *User = *UI;
17357     if (User->getOpcode() != ISD::ADD
17358         || UI.getUse().getResNo() != Addr.getResNo())
17359       continue;
17360 
17361     // If the increment is a constant, it must match the memory ref size.
17362     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17363     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17364       uint32_t IncVal = CInc->getZExtValue();
17365       unsigned NumBytes = VT.getScalarSizeInBits() / 8;
17366       if (IncVal != NumBytes)
17367         continue;
17368       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17369     }
17370 
17371     // To avoid cycle construction make sure that neither the load nor the add
17372     // are predecessors to each other or the Vector.
17373     SmallPtrSet<const SDNode *, 32> Visited;
17374     SmallVector<const SDNode *, 16> Worklist;
17375     Visited.insert(Addr.getNode());
17376     Worklist.push_back(User);
17377     Worklist.push_back(LD);
17378     Worklist.push_back(Vector.getNode());
17379     if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) ||
17380         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17381       continue;
17382 
17383     SmallVector<SDValue, 8> Ops;
17384     Ops.push_back(LD->getOperand(0));  // Chain
17385     if (IsLaneOp) {
17386       Ops.push_back(Vector);           // The vector to be inserted
17387       Ops.push_back(Lane);             // The lane to be inserted in the vector
17388     }
17389     Ops.push_back(Addr);
17390     Ops.push_back(Inc);
17391 
17392     EVT Tys[3] = { VT, MVT::i64, MVT::Other };
17393     SDVTList SDTys = DAG.getVTList(Tys);
17394     unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
17395     SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
17396                                            MemVT,
17397                                            LoadSDN->getMemOperand());
17398 
17399     // Update the uses.
17400     SDValue NewResults[] = {
17401         SDValue(LD, 0),            // The result of load
17402         SDValue(UpdN.getNode(), 2) // Chain
17403     };
17404     DCI.CombineTo(LD, NewResults);
17405     DCI.CombineTo(N, SDValue(UpdN.getNode(), 0));     // Dup/Inserted Result
17406     DCI.CombineTo(User, SDValue(UpdN.getNode(), 1));  // Write back register
17407 
17408     break;
17409   }
17410   return SDValue();
17411 }
17412 
17413 /// Simplify ``Addr`` given that the top byte of it is ignored by HW during
17414 /// address translation.
17415 static bool performTBISimplification(SDValue Addr,
17416                                      TargetLowering::DAGCombinerInfo &DCI,
17417                                      SelectionDAG &DAG) {
17418   APInt DemandedMask = APInt::getLowBitsSet(64, 56);
17419   KnownBits Known;
17420   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
17421                                         !DCI.isBeforeLegalizeOps());
17422   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17423   if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
17424     DCI.CommitTargetLoweringOpt(TLO);
17425     return true;
17426   }
17427   return false;
17428 }
17429 
17430 static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
17431   assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&
17432          "Expected STORE dag node in input!");
17433 
17434   if (auto Store = dyn_cast<StoreSDNode>(N)) {
17435     if (!Store->isTruncatingStore() || Store->isIndexed())
17436       return SDValue();
17437     SDValue Ext = Store->getValue();
17438     auto ExtOpCode = Ext.getOpcode();
17439     if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND &&
17440         ExtOpCode != ISD::ANY_EXTEND)
17441       return SDValue();
17442     SDValue Orig = Ext->getOperand(0);
17443     if (Store->getMemoryVT() != Orig.getValueType())
17444       return SDValue();
17445     return DAG.getStore(Store->getChain(), SDLoc(Store), Orig,
17446                         Store->getBasePtr(), Store->getMemOperand());
17447   }
17448 
17449   return SDValue();
17450 }
17451 
17452 static SDValue performSTORECombine(SDNode *N,
17453                                    TargetLowering::DAGCombinerInfo &DCI,
17454                                    SelectionDAG &DAG,
17455                                    const AArch64Subtarget *Subtarget) {
17456   StoreSDNode *ST = cast<StoreSDNode>(N);
17457   SDValue Chain = ST->getChain();
17458   SDValue Value = ST->getValue();
17459   SDValue Ptr = ST->getBasePtr();
17460 
17461   // If this is an FP_ROUND followed by a store, fold this into a truncating
17462   // store. We can do this even if this is already a truncstore.
17463   // We purposefully don't care about legality of the nodes here as we know
17464   // they can be split down into something legal.
17465   if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND &&
17466       Value.getNode()->hasOneUse() && ST->isUnindexed() &&
17467       Subtarget->useSVEForFixedLengthVectors() &&
17468       Value.getValueType().isFixedLengthVector() &&
17469       Value.getValueType().getFixedSizeInBits() >=
17470           Subtarget->getMinSVEVectorSizeInBits())
17471     return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
17472                              ST->getMemoryVT(), ST->getMemOperand());
17473 
17474   if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
17475     return Split;
17476 
17477   if (Subtarget->supportsAddressTopByteIgnored() &&
17478       performTBISimplification(N->getOperand(2), DCI, DAG))
17479     return SDValue(N, 0);
17480 
17481   if (SDValue Store = foldTruncStoreOfExt(DAG, N))
17482     return Store;
17483 
17484   return SDValue();
17485 }
17486 
17487 /// \return true if part of the index was folded into the Base.
17488 static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
17489                               SDLoc DL, SelectionDAG &DAG) {
17490   // This function assumes a vector of i64 indices.
17491   EVT IndexVT = Index.getValueType();
17492   if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64)
17493     return false;
17494 
17495   // Simplify:
17496   //   BasePtr = Ptr
17497   //   Index = X + splat(Offset)
17498   // ->
17499   //   BasePtr = Ptr + Offset * scale.
17500   //   Index = X
17501   if (Index.getOpcode() == ISD::ADD) {
17502     if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) {
17503       Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17504       BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17505       Index = Index.getOperand(0);
17506       return true;
17507     }
17508   }
17509 
17510   // Simplify:
17511   //   BasePtr = Ptr
17512   //   Index = (X + splat(Offset)) << splat(Shift)
17513   // ->
17514   //   BasePtr = Ptr + (Offset << Shift) * scale)
17515   //   Index = X << splat(shift)
17516   if (Index.getOpcode() == ISD::SHL &&
17517       Index.getOperand(0).getOpcode() == ISD::ADD) {
17518     SDValue Add = Index.getOperand(0);
17519     SDValue ShiftOp = Index.getOperand(1);
17520     SDValue OffsetOp = Add.getOperand(1);
17521     if (auto Shift = DAG.getSplatValue(ShiftOp))
17522       if (auto Offset = DAG.getSplatValue(OffsetOp)) {
17523         Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift);
17524         Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17525         BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17526         Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
17527                             Add.getOperand(0), ShiftOp);
17528         return true;
17529       }
17530   }
17531 
17532   return false;
17533 }
17534 
17535 // Analyse the specified address returning true if a more optimal addressing
17536 // mode is available. When returning true all parameters are updated to reflect
17537 // their recommended values.
17538 static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
17539                                      SDValue &BasePtr, SDValue &Index,
17540                                      SelectionDAG &DAG) {
17541   // Try to iteratively fold parts of the index into the base pointer to
17542   // simplify the index as much as possible.
17543   bool Changed = false;
17544   while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG))
17545     Changed = true;
17546 
17547   // Only consider element types that are pointer sized as smaller types can
17548   // be easily promoted.
17549   EVT IndexVT = Index.getValueType();
17550   if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64)
17551     return Changed;
17552 
17553   // Match:
17554   //   Index = step(const)
17555   int64_t Stride = 0;
17556   if (Index.getOpcode() == ISD::STEP_VECTOR)
17557     Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue();
17558 
17559   // Match:
17560   //   Index = step(const) << shift(const)
17561   else if (Index.getOpcode() == ISD::SHL &&
17562            Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) {
17563     SDValue RHS = Index.getOperand(1);
17564     if (auto *Shift =
17565             dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) {
17566       int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1);
17567       Stride = Step << Shift->getZExtValue();
17568     }
17569   }
17570 
17571   // Return early because no supported pattern is found.
17572   if (Stride == 0)
17573     return Changed;
17574 
17575   if (Stride < std::numeric_limits<int32_t>::min() ||
17576       Stride > std::numeric_limits<int32_t>::max())
17577     return Changed;
17578 
17579   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
17580   unsigned MaxVScale =
17581       Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
17582   int64_t LastElementOffset =
17583       IndexVT.getVectorMinNumElements() * Stride * MaxVScale;
17584 
17585   if (LastElementOffset < std::numeric_limits<int32_t>::min() ||
17586       LastElementOffset > std::numeric_limits<int32_t>::max())
17587     return Changed;
17588 
17589   EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
17590   // Stride does not scale explicitly by 'Scale', because it happens in
17591   // the gather/scatter addressing mode.
17592   Index = DAG.getNode(ISD::STEP_VECTOR, SDLoc(N), NewIndexVT,
17593                       DAG.getTargetConstant(Stride, SDLoc(N), MVT::i32));
17594   return true;
17595 }
17596 
17597 static SDValue performMaskedGatherScatterCombine(
17598     SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {
17599   MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N);
17600   assert(MGS && "Can only combine gather load or scatter store nodes");
17601 
17602   if (!DCI.isBeforeLegalize())
17603     return SDValue();
17604 
17605   SDLoc DL(MGS);
17606   SDValue Chain = MGS->getChain();
17607   SDValue Scale = MGS->getScale();
17608   SDValue Index = MGS->getIndex();
17609   SDValue Mask = MGS->getMask();
17610   SDValue BasePtr = MGS->getBasePtr();
17611   ISD::MemIndexType IndexType = MGS->getIndexType();
17612 
17613   if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG))
17614     return SDValue();
17615 
17616   // Here we catch such cases early and change MGATHER's IndexType to allow
17617   // the use of an Index that's more legalisation friendly.
17618   if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) {
17619     SDValue PassThru = MGT->getPassThru();
17620     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
17621     return DAG.getMaskedGather(
17622         DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL,
17623         Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType());
17624   }
17625   auto *MSC = cast<MaskedScatterSDNode>(MGS);
17626   SDValue Data = MSC->getValue();
17627   SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale};
17628   return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL,
17629                               Ops, MSC->getMemOperand(), IndexType,
17630                               MSC->isTruncatingStore());
17631 }
17632 
17633 /// Target-specific DAG combine function for NEON load/store intrinsics
17634 /// to merge base address updates.
17635 static SDValue performNEONPostLDSTCombine(SDNode *N,
17636                                           TargetLowering::DAGCombinerInfo &DCI,
17637                                           SelectionDAG &DAG) {
17638   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
17639     return SDValue();
17640 
17641   unsigned AddrOpIdx = N->getNumOperands() - 1;
17642   SDValue Addr = N->getOperand(AddrOpIdx);
17643 
17644   // Search for a use of the address operand that is an increment.
17645   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
17646        UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
17647     SDNode *User = *UI;
17648     if (User->getOpcode() != ISD::ADD ||
17649         UI.getUse().getResNo() != Addr.getResNo())
17650       continue;
17651 
17652     // Check that the add is independent of the load/store.  Otherwise, folding
17653     // it would create a cycle.
17654     SmallPtrSet<const SDNode *, 32> Visited;
17655     SmallVector<const SDNode *, 16> Worklist;
17656     Visited.insert(Addr.getNode());
17657     Worklist.push_back(N);
17658     Worklist.push_back(User);
17659     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
17660         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17661       continue;
17662 
17663     // Find the new opcode for the updating load/store.
17664     bool IsStore = false;
17665     bool IsLaneOp = false;
17666     bool IsDupOp = false;
17667     unsigned NewOpc = 0;
17668     unsigned NumVecs = 0;
17669     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
17670     switch (IntNo) {
17671     default: llvm_unreachable("unexpected intrinsic for Neon base update");
17672     case Intrinsic::aarch64_neon_ld2:       NewOpc = AArch64ISD::LD2post;
17673       NumVecs = 2; break;
17674     case Intrinsic::aarch64_neon_ld3:       NewOpc = AArch64ISD::LD3post;
17675       NumVecs = 3; break;
17676     case Intrinsic::aarch64_neon_ld4:       NewOpc = AArch64ISD::LD4post;
17677       NumVecs = 4; break;
17678     case Intrinsic::aarch64_neon_st2:       NewOpc = AArch64ISD::ST2post;
17679       NumVecs = 2; IsStore = true; break;
17680     case Intrinsic::aarch64_neon_st3:       NewOpc = AArch64ISD::ST3post;
17681       NumVecs = 3; IsStore = true; break;
17682     case Intrinsic::aarch64_neon_st4:       NewOpc = AArch64ISD::ST4post;
17683       NumVecs = 4; IsStore = true; break;
17684     case Intrinsic::aarch64_neon_ld1x2:     NewOpc = AArch64ISD::LD1x2post;
17685       NumVecs = 2; break;
17686     case Intrinsic::aarch64_neon_ld1x3:     NewOpc = AArch64ISD::LD1x3post;
17687       NumVecs = 3; break;
17688     case Intrinsic::aarch64_neon_ld1x4:     NewOpc = AArch64ISD::LD1x4post;
17689       NumVecs = 4; break;
17690     case Intrinsic::aarch64_neon_st1x2:     NewOpc = AArch64ISD::ST1x2post;
17691       NumVecs = 2; IsStore = true; break;
17692     case Intrinsic::aarch64_neon_st1x3:     NewOpc = AArch64ISD::ST1x3post;
17693       NumVecs = 3; IsStore = true; break;
17694     case Intrinsic::aarch64_neon_st1x4:     NewOpc = AArch64ISD::ST1x4post;
17695       NumVecs = 4; IsStore = true; break;
17696     case Intrinsic::aarch64_neon_ld2r:      NewOpc = AArch64ISD::LD2DUPpost;
17697       NumVecs = 2; IsDupOp = true; break;
17698     case Intrinsic::aarch64_neon_ld3r:      NewOpc = AArch64ISD::LD3DUPpost;
17699       NumVecs = 3; IsDupOp = true; break;
17700     case Intrinsic::aarch64_neon_ld4r:      NewOpc = AArch64ISD::LD4DUPpost;
17701       NumVecs = 4; IsDupOp = true; break;
17702     case Intrinsic::aarch64_neon_ld2lane:   NewOpc = AArch64ISD::LD2LANEpost;
17703       NumVecs = 2; IsLaneOp = true; break;
17704     case Intrinsic::aarch64_neon_ld3lane:   NewOpc = AArch64ISD::LD3LANEpost;
17705       NumVecs = 3; IsLaneOp = true; break;
17706     case Intrinsic::aarch64_neon_ld4lane:   NewOpc = AArch64ISD::LD4LANEpost;
17707       NumVecs = 4; IsLaneOp = true; break;
17708     case Intrinsic::aarch64_neon_st2lane:   NewOpc = AArch64ISD::ST2LANEpost;
17709       NumVecs = 2; IsStore = true; IsLaneOp = true; break;
17710     case Intrinsic::aarch64_neon_st3lane:   NewOpc = AArch64ISD::ST3LANEpost;
17711       NumVecs = 3; IsStore = true; IsLaneOp = true; break;
17712     case Intrinsic::aarch64_neon_st4lane:   NewOpc = AArch64ISD::ST4LANEpost;
17713       NumVecs = 4; IsStore = true; IsLaneOp = true; break;
17714     }
17715 
17716     EVT VecTy;
17717     if (IsStore)
17718       VecTy = N->getOperand(2).getValueType();
17719     else
17720       VecTy = N->getValueType(0);
17721 
17722     // If the increment is a constant, it must match the memory ref size.
17723     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17724     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17725       uint32_t IncVal = CInc->getZExtValue();
17726       unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
17727       if (IsLaneOp || IsDupOp)
17728         NumBytes /= VecTy.getVectorNumElements();
17729       if (IncVal != NumBytes)
17730         continue;
17731       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17732     }
17733     SmallVector<SDValue, 8> Ops;
17734     Ops.push_back(N->getOperand(0)); // Incoming chain
17735     // Load lane and store have vector list as input.
17736     if (IsLaneOp || IsStore)
17737       for (unsigned i = 2; i < AddrOpIdx; ++i)
17738         Ops.push_back(N->getOperand(i));
17739     Ops.push_back(Addr); // Base register
17740     Ops.push_back(Inc);
17741 
17742     // Return Types.
17743     EVT Tys[6];
17744     unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
17745     unsigned n;
17746     for (n = 0; n < NumResultVecs; ++n)
17747       Tys[n] = VecTy;
17748     Tys[n++] = MVT::i64;  // Type of write back register
17749     Tys[n] = MVT::Other;  // Type of the chain
17750     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
17751 
17752     MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
17753     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
17754                                            MemInt->getMemoryVT(),
17755                                            MemInt->getMemOperand());
17756 
17757     // Update the uses.
17758     std::vector<SDValue> NewResults;
17759     for (unsigned i = 0; i < NumResultVecs; ++i) {
17760       NewResults.push_back(SDValue(UpdN.getNode(), i));
17761     }
17762     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
17763     DCI.CombineTo(N, NewResults);
17764     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
17765 
17766     break;
17767   }
17768   return SDValue();
17769 }
17770 
17771 // Checks to see if the value is the prescribed width and returns information
17772 // about its extension mode.
17773 static
17774 bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
17775   ExtType = ISD::NON_EXTLOAD;
17776   switch(V.getNode()->getOpcode()) {
17777   default:
17778     return false;
17779   case ISD::LOAD: {
17780     LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
17781     if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8)
17782        || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) {
17783       ExtType = LoadNode->getExtensionType();
17784       return true;
17785     }
17786     return false;
17787   }
17788   case ISD::AssertSext: {
17789     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17790     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17791        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17792       ExtType = ISD::SEXTLOAD;
17793       return true;
17794     }
17795     return false;
17796   }
17797   case ISD::AssertZext: {
17798     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17799     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17800        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17801       ExtType = ISD::ZEXTLOAD;
17802       return true;
17803     }
17804     return false;
17805   }
17806   case ISD::Constant:
17807   case ISD::TargetConstant: {
17808     return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
17809            1LL << (width - 1);
17810   }
17811   }
17812 
17813   return true;
17814 }
17815 
17816 // This function does a whole lot of voodoo to determine if the tests are
17817 // equivalent without and with a mask. Essentially what happens is that given a
17818 // DAG resembling:
17819 //
17820 //  +-------------+ +-------------+ +-------------+ +-------------+
17821 //  |    Input    | | AddConstant | | CompConstant| |     CC      |
17822 //  +-------------+ +-------------+ +-------------+ +-------------+
17823 //           |           |           |               |
17824 //           V           V           |    +----------+
17825 //          +-------------+  +----+  |    |
17826 //          |     ADD     |  |0xff|  |    |
17827 //          +-------------+  +----+  |    |
17828 //                  |           |    |    |
17829 //                  V           V    |    |
17830 //                 +-------------+   |    |
17831 //                 |     AND     |   |    |
17832 //                 +-------------+   |    |
17833 //                      |            |    |
17834 //                      +-----+      |    |
17835 //                            |      |    |
17836 //                            V      V    V
17837 //                           +-------------+
17838 //                           |     CMP     |
17839 //                           +-------------+
17840 //
17841 // The AND node may be safely removed for some combinations of inputs. In
17842 // particular we need to take into account the extension type of the Input,
17843 // the exact values of AddConstant, CompConstant, and CC, along with the nominal
17844 // width of the input (this can work for any width inputs, the above graph is
17845 // specific to 8 bits.
17846 //
17847 // The specific equations were worked out by generating output tables for each
17848 // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The
17849 // problem was simplified by working with 4 bit inputs, which means we only
17850 // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero
17851 // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8
17852 // patterns present in both extensions (0,7). For every distinct set of
17853 // AddConstant and CompConstants bit patterns we can consider the masked and
17854 // unmasked versions to be equivalent if the result of this function is true for
17855 // all 16 distinct bit patterns of for the current extension type of Input (w0).
17856 //
17857 //   sub      w8, w0, w1
17858 //   and      w10, w8, #0x0f
17859 //   cmp      w8, w2
17860 //   cset     w9, AArch64CC
17861 //   cmp      w10, w2
17862 //   cset     w11, AArch64CC
17863 //   cmp      w9, w11
17864 //   cset     w0, eq
17865 //   ret
17866 //
17867 // Since the above function shows when the outputs are equivalent it defines
17868 // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and
17869 // would be expensive to run during compiles. The equations below were written
17870 // in a test harness that confirmed they gave equivalent outputs to the above
17871 // for all inputs function, so they can be used determine if the removal is
17872 // legal instead.
17873 //
17874 // isEquivalentMaskless() is the code for testing if the AND can be removed
17875 // factored out of the DAG recognition as the DAG can take several forms.
17876 
17877 static bool isEquivalentMaskless(unsigned CC, unsigned width,
17878                                  ISD::LoadExtType ExtType, int AddConstant,
17879                                  int CompConstant) {
17880   // By being careful about our equations and only writing the in term
17881   // symbolic values and well known constants (0, 1, -1, MaxUInt) we can
17882   // make them generally applicable to all bit widths.
17883   int MaxUInt = (1 << width);
17884 
17885   // For the purposes of these comparisons sign extending the type is
17886   // equivalent to zero extending the add and displacing it by half the integer
17887   // width. Provided we are careful and make sure our equations are valid over
17888   // the whole range we can just adjust the input and avoid writing equations
17889   // for sign extended inputs.
17890   if (ExtType == ISD::SEXTLOAD)
17891     AddConstant -= (1 << (width-1));
17892 
17893   switch(CC) {
17894   case AArch64CC::LE:
17895   case AArch64CC::GT:
17896     if ((AddConstant == 0) ||
17897         (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
17898         (AddConstant >= 0 && CompConstant < 0) ||
17899         (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
17900       return true;
17901     break;
17902   case AArch64CC::LT:
17903   case AArch64CC::GE:
17904     if ((AddConstant == 0) ||
17905         (AddConstant >= 0 && CompConstant <= 0) ||
17906         (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
17907       return true;
17908     break;
17909   case AArch64CC::HI:
17910   case AArch64CC::LS:
17911     if ((AddConstant >= 0 && CompConstant < 0) ||
17912        (AddConstant <= 0 && CompConstant >= -1 &&
17913         CompConstant < AddConstant + MaxUInt))
17914       return true;
17915    break;
17916   case AArch64CC::PL:
17917   case AArch64CC::MI:
17918     if ((AddConstant == 0) ||
17919         (AddConstant > 0 && CompConstant <= 0) ||
17920         (AddConstant < 0 && CompConstant <= AddConstant))
17921       return true;
17922     break;
17923   case AArch64CC::LO:
17924   case AArch64CC::HS:
17925     if ((AddConstant >= 0 && CompConstant <= 0) ||
17926         (AddConstant <= 0 && CompConstant >= 0 &&
17927          CompConstant <= AddConstant + MaxUInt))
17928       return true;
17929     break;
17930   case AArch64CC::EQ:
17931   case AArch64CC::NE:
17932     if ((AddConstant > 0 && CompConstant < 0) ||
17933         (AddConstant < 0 && CompConstant >= 0 &&
17934          CompConstant < AddConstant + MaxUInt) ||
17935         (AddConstant >= 0 && CompConstant >= 0 &&
17936          CompConstant >= AddConstant) ||
17937         (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
17938       return true;
17939     break;
17940   case AArch64CC::VS:
17941   case AArch64CC::VC:
17942   case AArch64CC::AL:
17943   case AArch64CC::NV:
17944     return true;
17945   case AArch64CC::Invalid:
17946     break;
17947   }
17948 
17949   return false;
17950 }
17951 
17952 static
17953 SDValue performCONDCombine(SDNode *N,
17954                            TargetLowering::DAGCombinerInfo &DCI,
17955                            SelectionDAG &DAG, unsigned CCIndex,
17956                            unsigned CmpIndex) {
17957   unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue();
17958   SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
17959   unsigned CondOpcode = SubsNode->getOpcode();
17960 
17961   if (CondOpcode != AArch64ISD::SUBS)
17962     return SDValue();
17963 
17964   // There is a SUBS feeding this condition. Is it fed by a mask we can
17965   // use?
17966 
17967   SDNode *AndNode = SubsNode->getOperand(0).getNode();
17968   unsigned MaskBits = 0;
17969 
17970   if (AndNode->getOpcode() != ISD::AND)
17971     return SDValue();
17972 
17973   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) {
17974     uint32_t CNV = CN->getZExtValue();
17975     if (CNV == 255)
17976       MaskBits = 8;
17977     else if (CNV == 65535)
17978       MaskBits = 16;
17979   }
17980 
17981   if (!MaskBits)
17982     return SDValue();
17983 
17984   SDValue AddValue = AndNode->getOperand(0);
17985 
17986   if (AddValue.getOpcode() != ISD::ADD)
17987     return SDValue();
17988 
17989   // The basic dag structure is correct, grab the inputs and validate them.
17990 
17991   SDValue AddInputValue1 = AddValue.getNode()->getOperand(0);
17992   SDValue AddInputValue2 = AddValue.getNode()->getOperand(1);
17993   SDValue SubsInputValue = SubsNode->getOperand(1);
17994 
17995   // The mask is present and the provenance of all the values is a smaller type,
17996   // lets see if the mask is superfluous.
17997 
17998   if (!isa<ConstantSDNode>(AddInputValue2.getNode()) ||
17999       !isa<ConstantSDNode>(SubsInputValue.getNode()))
18000     return SDValue();
18001 
18002   ISD::LoadExtType ExtType;
18003 
18004   if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) ||
18005       !checkValueWidth(AddInputValue2, MaskBits, ExtType) ||
18006       !checkValueWidth(AddInputValue1, MaskBits, ExtType) )
18007     return SDValue();
18008 
18009   if(!isEquivalentMaskless(CC, MaskBits, ExtType,
18010                 cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(),
18011                 cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue()))
18012     return SDValue();
18013 
18014   // The AND is not necessary, remove it.
18015 
18016   SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0),
18017                                SubsNode->getValueType(1));
18018   SDValue Ops[] = { AddValue, SubsNode->getOperand(1) };
18019 
18020   SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops);
18021   DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode());
18022 
18023   return SDValue(N, 0);
18024 }
18025 
18026 // Optimize compare with zero and branch.
18027 static SDValue performBRCONDCombine(SDNode *N,
18028                                     TargetLowering::DAGCombinerInfo &DCI,
18029                                     SelectionDAG &DAG) {
18030   MachineFunction &MF = DAG.getMachineFunction();
18031   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
18032   // will not be produced, as they are conditional branch instructions that do
18033   // not set flags.
18034   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
18035     return SDValue();
18036 
18037   if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
18038     N = NV.getNode();
18039   SDValue Chain = N->getOperand(0);
18040   SDValue Dest = N->getOperand(1);
18041   SDValue CCVal = N->getOperand(2);
18042   SDValue Cmp = N->getOperand(3);
18043 
18044   assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
18045   unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
18046   if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
18047     return SDValue();
18048 
18049   unsigned CmpOpc = Cmp.getOpcode();
18050   if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
18051     return SDValue();
18052 
18053   // Only attempt folding if there is only one use of the flag and no use of the
18054   // value.
18055   if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
18056     return SDValue();
18057 
18058   SDValue LHS = Cmp.getOperand(0);
18059   SDValue RHS = Cmp.getOperand(1);
18060 
18061   assert(LHS.getValueType() == RHS.getValueType() &&
18062          "Expected the value type to be the same for both operands!");
18063   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
18064     return SDValue();
18065 
18066   if (isNullConstant(LHS))
18067     std::swap(LHS, RHS);
18068 
18069   if (!isNullConstant(RHS))
18070     return SDValue();
18071 
18072   if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
18073       LHS.getOpcode() == ISD::SRL)
18074     return SDValue();
18075 
18076   // Fold the compare into the branch instruction.
18077   SDValue BR;
18078   if (CC == AArch64CC::EQ)
18079     BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18080   else
18081     BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18082 
18083   // Do not add new nodes to DAG combiner worklist.
18084   DCI.CombineTo(N, BR, false);
18085 
18086   return SDValue();
18087 }
18088 
18089 static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {
18090   unsigned CC = N->getConstantOperandVal(2);
18091   SDValue SUBS = N->getOperand(3);
18092   SDValue Zero, CTTZ;
18093 
18094   if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) {
18095     Zero = N->getOperand(0);
18096     CTTZ = N->getOperand(1);
18097   } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) {
18098     Zero = N->getOperand(1);
18099     CTTZ = N->getOperand(0);
18100   } else
18101     return SDValue();
18102 
18103   if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) ||
18104       (CTTZ.getOpcode() == ISD::TRUNCATE &&
18105        CTTZ.getOperand(0).getOpcode() != ISD::CTTZ))
18106     return SDValue();
18107 
18108   assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&
18109          "Illegal type in CTTZ folding");
18110 
18111   if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1)))
18112     return SDValue();
18113 
18114   SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE
18115                   ? CTTZ.getOperand(0).getOperand(0)
18116                   : CTTZ.getOperand(0);
18117 
18118   if (X != SUBS.getOperand(0))
18119     return SDValue();
18120 
18121   unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE
18122                           ? CTTZ.getOperand(0).getValueSizeInBits()
18123                           : CTTZ.getValueSizeInBits();
18124   SDValue BitWidthMinusOne =
18125       DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType());
18126   return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ,
18127                      BitWidthMinusOne);
18128 }
18129 
18130 // Optimize CSEL instructions
18131 static SDValue performCSELCombine(SDNode *N,
18132                                   TargetLowering::DAGCombinerInfo &DCI,
18133                                   SelectionDAG &DAG) {
18134   // CSEL x, x, cc -> x
18135   if (N->getOperand(0) == N->getOperand(1))
18136     return N->getOperand(0);
18137 
18138   // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1
18139   // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1
18140   if (SDValue Folded = foldCSELofCTTZ(N, DAG))
18141 		return Folded;
18142 
18143   return performCONDCombine(N, DCI, DAG, 2, 3);
18144 }
18145 
18146 // Try to re-use an already extended operand of a vector SetCC feeding a
18147 // extended select. Doing so avoids requiring another full extension of the
18148 // SET_CC result when lowering the select.
18149 static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
18150   EVT Op0MVT = Op->getOperand(0).getValueType();
18151   if (!Op0MVT.isVector() || Op->use_empty())
18152     return SDValue();
18153 
18154   // Make sure that all uses of Op are VSELECTs with result matching types where
18155   // the result type has a larger element type than the SetCC operand.
18156   SDNode *FirstUse = *Op->use_begin();
18157   if (FirstUse->getOpcode() != ISD::VSELECT)
18158     return SDValue();
18159   EVT UseMVT = FirstUse->getValueType(0);
18160   if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits())
18161     return SDValue();
18162   if (any_of(Op->uses(), [&UseMVT](const SDNode *N) {
18163         return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT;
18164       }))
18165     return SDValue();
18166 
18167   APInt V;
18168   if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V))
18169     return SDValue();
18170 
18171   SDLoc DL(Op);
18172   SDValue Op0ExtV;
18173   SDValue Op1ExtV;
18174   ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get();
18175   // Check if the first operand of the SET_CC is already extended. If it is,
18176   // split the SET_CC and re-use the extended version of the operand.
18177   SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT),
18178                                         Op->getOperand(0));
18179   SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT),
18180                                         Op->getOperand(0));
18181   if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18182     Op0ExtV = SDValue(Op0SExt, 0);
18183     Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1));
18184   } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18185     Op0ExtV = SDValue(Op0ZExt, 0);
18186     Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1));
18187   } else
18188     return SDValue();
18189 
18190   return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
18191                      Op0ExtV, Op1ExtV, Op->getOperand(2));
18192 }
18193 
18194 static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
18195   assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
18196   SDValue LHS = N->getOperand(0);
18197   SDValue RHS = N->getOperand(1);
18198   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
18199   SDLoc DL(N);
18200   EVT VT = N->getValueType(0);
18201 
18202   if (SDValue V = tryToWidenSetCCOperands(N, DAG))
18203     return V;
18204 
18205   // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X
18206   if (Cond == ISD::SETNE && isOneConstant(RHS) &&
18207       LHS->getOpcode() == AArch64ISD::CSEL &&
18208       isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
18209       LHS->hasOneUse()) {
18210     // Invert CSEL's condition.
18211     auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2));
18212     auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue());
18213     auto NewCond = getInvertedCondCode(OldCond);
18214 
18215     // csel 0, 1, !cond, X
18216     SDValue CSEL =
18217         DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0),
18218                     LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32),
18219                     LHS.getOperand(3));
18220     return DAG.getZExtOrTrunc(CSEL, DL, VT);
18221   }
18222 
18223   // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
18224   if (Cond == ISD::SETNE && isNullConstant(RHS) &&
18225       LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
18226       LHS->hasOneUse()) {
18227     EVT TstVT = LHS->getValueType(0);
18228     if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
18229       // this pattern will get better opt in emitComparison
18230       uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
18231       SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),
18232                                 DAG.getConstant(TstImm, DL, TstVT));
18233       return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2));
18234     }
18235   }
18236 
18237   return SDValue();
18238 }
18239 
18240 // Replace a flag-setting operator (eg ANDS) with the generic version
18241 // (eg AND) if the flag is unused.
18242 static SDValue performFlagSettingCombine(SDNode *N,
18243                                          TargetLowering::DAGCombinerInfo &DCI,
18244                                          unsigned GenericOpcode) {
18245   SDLoc DL(N);
18246   SDValue LHS = N->getOperand(0);
18247   SDValue RHS = N->getOperand(1);
18248   EVT VT = N->getValueType(0);
18249 
18250   // If the flag result isn't used, convert back to a generic opcode.
18251   if (!N->hasAnyUseOfValue(1)) {
18252     SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops());
18253     return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)},
18254                                   DL);
18255   }
18256 
18257   // Combine identical generic nodes into this node, re-using the result.
18258   if (SDNode *Generic = DCI.DAG.getNodeIfExists(
18259           GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
18260     DCI.CombineTo(Generic, SDValue(N, 0));
18261 
18262   return SDValue();
18263 }
18264 
18265 static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {
18266   // setcc_merge_zero pred
18267   //   (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne
18268   //   => extract_subvector (inner setcc_merge_zero)
18269   SDValue Pred = N->getOperand(0);
18270   SDValue LHS = N->getOperand(1);
18271   SDValue RHS = N->getOperand(2);
18272   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18273 
18274   if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) ||
18275       LHS->getOpcode() != ISD::SIGN_EXTEND)
18276     return SDValue();
18277 
18278   SDValue Extract = LHS->getOperand(0);
18279   if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
18280       Extract->getValueType(0) != N->getValueType(0) ||
18281       Extract->getConstantOperandVal(1) != 0)
18282     return SDValue();
18283 
18284   SDValue InnerSetCC = Extract->getOperand(0);
18285   if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO)
18286     return SDValue();
18287 
18288   // By this point we've effectively got
18289   // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive
18290   // lanes are already zero then the trunc(sext()) sequence is redundant and we
18291   // can operate on A directly.
18292   SDValue InnerPred = InnerSetCC.getOperand(0);
18293   if (Pred.getOpcode() == AArch64ISD::PTRUE &&
18294       InnerPred.getOpcode() == AArch64ISD::PTRUE &&
18295       Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) &&
18296       Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 &&
18297       Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256)
18298     return Extract;
18299 
18300   return SDValue();
18301 }
18302 
18303 static SDValue
18304 performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
18305   assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18306          "Unexpected opcode!");
18307 
18308   SelectionDAG &DAG = DCI.DAG;
18309   SDValue Pred = N->getOperand(0);
18310   SDValue LHS = N->getOperand(1);
18311   SDValue RHS = N->getOperand(2);
18312   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18313 
18314   if (SDValue V = performSetCCPunpkCombine(N, DAG))
18315     return V;
18316 
18317   if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
18318       LHS->getOpcode() == ISD::SIGN_EXTEND &&
18319       LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) {
18320     //    setcc_merge_zero(
18321     //       pred, extend(setcc_merge_zero(pred, ...)), != splat(0))
18322     // => setcc_merge_zero(pred, ...)
18323     if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18324         LHS->getOperand(0)->getOperand(0) == Pred)
18325       return LHS->getOperand(0);
18326 
18327     //    setcc_merge_zero(
18328     //        all_active, extend(nxvNi1 ...), != splat(0))
18329     // -> nxvNi1 ...
18330     if (isAllActivePredicate(DAG, Pred))
18331       return LHS->getOperand(0);
18332 
18333     //    setcc_merge_zero(
18334     //        pred, extend(nxvNi1 ...), != splat(0))
18335     // -> nxvNi1 and(pred, ...)
18336     if (DCI.isAfterLegalizeDAG())
18337       // Do this after legalization to allow more folds on setcc_merge_zero
18338       // to be recognized.
18339       return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0),
18340                          LHS->getOperand(0), Pred);
18341   }
18342 
18343   return SDValue();
18344 }
18345 
18346 // Optimize some simple tbz/tbnz cases.  Returns the new operand and bit to test
18347 // as well as whether the test should be inverted.  This code is required to
18348 // catch these cases (as opposed to standard dag combines) because
18349 // AArch64ISD::TBZ is matched during legalization.
18350 static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
18351                                  SelectionDAG &DAG) {
18352 
18353   if (!Op->hasOneUse())
18354     return Op;
18355 
18356   // We don't handle undef/constant-fold cases below, as they should have
18357   // already been taken care of (e.g. and of 0, test of undefined shifted bits,
18358   // etc.)
18359 
18360   // (tbz (trunc x), b) -> (tbz x, b)
18361   // This case is just here to enable more of the below cases to be caught.
18362   if (Op->getOpcode() == ISD::TRUNCATE &&
18363       Bit < Op->getValueType(0).getSizeInBits()) {
18364     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18365   }
18366 
18367   // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
18368   if (Op->getOpcode() == ISD::ANY_EXTEND &&
18369       Bit < Op->getOperand(0).getValueSizeInBits()) {
18370     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18371   }
18372 
18373   if (Op->getNumOperands() != 2)
18374     return Op;
18375 
18376   auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
18377   if (!C)
18378     return Op;
18379 
18380   switch (Op->getOpcode()) {
18381   default:
18382     return Op;
18383 
18384   // (tbz (and x, m), b) -> (tbz x, b)
18385   case ISD::AND:
18386     if ((C->getZExtValue() >> Bit) & 1)
18387       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18388     return Op;
18389 
18390   // (tbz (shl x, c), b) -> (tbz x, b-c)
18391   case ISD::SHL:
18392     if (C->getZExtValue() <= Bit &&
18393         (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18394       Bit = Bit - C->getZExtValue();
18395       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18396     }
18397     return Op;
18398 
18399   // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x
18400   case ISD::SRA:
18401     Bit = Bit + C->getZExtValue();
18402     if (Bit >= Op->getValueType(0).getSizeInBits())
18403       Bit = Op->getValueType(0).getSizeInBits() - 1;
18404     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18405 
18406   // (tbz (srl x, c), b) -> (tbz x, b+c)
18407   case ISD::SRL:
18408     if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18409       Bit = Bit + C->getZExtValue();
18410       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18411     }
18412     return Op;
18413 
18414   // (tbz (xor x, -1), b) -> (tbnz x, b)
18415   case ISD::XOR:
18416     if ((C->getZExtValue() >> Bit) & 1)
18417       Invert = !Invert;
18418     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18419   }
18420 }
18421 
18422 // Optimize test single bit zero/non-zero and branch.
18423 static SDValue performTBZCombine(SDNode *N,
18424                                  TargetLowering::DAGCombinerInfo &DCI,
18425                                  SelectionDAG &DAG) {
18426   unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
18427   bool Invert = false;
18428   SDValue TestSrc = N->getOperand(1);
18429   SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
18430 
18431   if (TestSrc == NewTestSrc)
18432     return SDValue();
18433 
18434   unsigned NewOpc = N->getOpcode();
18435   if (Invert) {
18436     if (NewOpc == AArch64ISD::TBZ)
18437       NewOpc = AArch64ISD::TBNZ;
18438     else {
18439       assert(NewOpc == AArch64ISD::TBNZ);
18440       NewOpc = AArch64ISD::TBZ;
18441     }
18442   }
18443 
18444   SDLoc DL(N);
18445   return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc,
18446                      DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3));
18447 }
18448 
18449 // Swap vselect operands where it may allow a predicated operation to achieve
18450 // the `sel`.
18451 //
18452 //     (vselect (setcc ( condcode) (_) (_)) (a)          (op (a) (b)))
18453 //  => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a))
18454 static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {
18455   auto SelectA = N->getOperand(1);
18456   auto SelectB = N->getOperand(2);
18457   auto NTy = N->getValueType(0);
18458 
18459   if (!NTy.isScalableVector())
18460     return SDValue();
18461   SDValue SetCC = N->getOperand(0);
18462   if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse())
18463     return SDValue();
18464 
18465   switch (SelectB.getOpcode()) {
18466   default:
18467     return SDValue();
18468   case ISD::FMUL:
18469   case ISD::FSUB:
18470   case ISD::FADD:
18471     break;
18472   }
18473   if (SelectA != SelectB.getOperand(0))
18474     return SDValue();
18475 
18476   ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
18477   ISD::CondCode InverseCC =
18478       ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType());
18479   auto InverseSetCC =
18480       DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0),
18481                    SetCC.getOperand(1), InverseCC);
18482 
18483   return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy,
18484                      {InverseSetCC, SelectB, SelectA});
18485 }
18486 
18487 // vselect (v1i1 setcc) ->
18488 //     vselect (v1iXX setcc)  (XX is the size of the compared operand type)
18489 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
18490 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
18491 // such VSELECT.
18492 static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
18493   if (auto SwapResult = trySwapVSelectOperands(N, DAG))
18494     return SwapResult;
18495 
18496   SDValue N0 = N->getOperand(0);
18497   EVT CCVT = N0.getValueType();
18498 
18499   if (isAllActivePredicate(DAG, N0))
18500     return N->getOperand(1);
18501 
18502   if (isAllInactivePredicate(N0))
18503     return N->getOperand(2);
18504 
18505   // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
18506   // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
18507   // supported types.
18508   SDValue SetCC = N->getOperand(0);
18509   if (SetCC.getOpcode() == ISD::SETCC &&
18510       SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) {
18511     SDValue CmpLHS = SetCC.getOperand(0);
18512     EVT VT = CmpLHS.getValueType();
18513     SDNode *CmpRHS = SetCC.getOperand(1).getNode();
18514     SDNode *SplatLHS = N->getOperand(1).getNode();
18515     SDNode *SplatRHS = N->getOperand(2).getNode();
18516     APInt SplatLHSVal;
18517     if (CmpLHS.getValueType() == N->getOperand(1).getValueType() &&
18518         VT.isSimple() &&
18519         is_contained(
18520             makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
18521                           MVT::v2i32, MVT::v4i32, MVT::v2i64}),
18522             VT.getSimpleVT().SimpleTy) &&
18523         ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) &&
18524         SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) &&
18525         ISD::isConstantSplatVectorAllOnes(SplatRHS)) {
18526       unsigned NumElts = VT.getVectorNumElements();
18527       SmallVector<SDValue, 8> Ops(
18528           NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N),
18529                                    VT.getScalarType()));
18530       SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops);
18531 
18532       auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val);
18533       auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1));
18534       return Or;
18535     }
18536   }
18537 
18538   if (N0.getOpcode() != ISD::SETCC ||
18539       CCVT.getVectorElementCount() != ElementCount::getFixed(1) ||
18540       CCVT.getVectorElementType() != MVT::i1)
18541     return SDValue();
18542 
18543   EVT ResVT = N->getValueType(0);
18544   EVT CmpVT = N0.getOperand(0).getValueType();
18545   // Only combine when the result type is of the same size as the compared
18546   // operands.
18547   if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
18548     return SDValue();
18549 
18550   SDValue IfTrue = N->getOperand(1);
18551   SDValue IfFalse = N->getOperand(2);
18552   SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
18553                        N0.getOperand(0), N0.getOperand(1),
18554                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
18555   return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
18556                      IfTrue, IfFalse);
18557 }
18558 
18559 /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
18560 /// the compare-mask instructions rather than going via NZCV, even if LHS and
18561 /// RHS are really scalar. This replaces any scalar setcc in the above pattern
18562 /// with a vector one followed by a DUP shuffle on the result.
18563 static SDValue performSelectCombine(SDNode *N,
18564                                     TargetLowering::DAGCombinerInfo &DCI) {
18565   SelectionDAG &DAG = DCI.DAG;
18566   SDValue N0 = N->getOperand(0);
18567   EVT ResVT = N->getValueType(0);
18568 
18569   if (N0.getOpcode() != ISD::SETCC)
18570     return SDValue();
18571 
18572   if (ResVT.isScalableVector())
18573     return SDValue();
18574 
18575   // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
18576   // scalar SetCCResultType. We also don't expect vectors, because we assume
18577   // that selects fed by vector SETCCs are canonicalized to VSELECT.
18578   assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&
18579          "Scalar-SETCC feeding SELECT has unexpected result type!");
18580 
18581   // If NumMaskElts == 0, the comparison is larger than select result. The
18582   // largest real NEON comparison is 64-bits per lane, which means the result is
18583   // at most 32-bits and an illegal vector. Just bail out for now.
18584   EVT SrcVT = N0.getOperand(0).getValueType();
18585 
18586   // Don't try to do this optimization when the setcc itself has i1 operands.
18587   // There are no legal vectors of i1, so this would be pointless.
18588   if (SrcVT == MVT::i1)
18589     return SDValue();
18590 
18591   int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
18592   if (!ResVT.isVector() || NumMaskElts == 0)
18593     return SDValue();
18594 
18595   SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts);
18596   EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
18597 
18598   // Also bail out if the vector CCVT isn't the same size as ResVT.
18599   // This can happen if the SETCC operand size doesn't divide the ResVT size
18600   // (e.g., f64 vs v3f32).
18601   if (CCVT.getSizeInBits() != ResVT.getSizeInBits())
18602     return SDValue();
18603 
18604   // Make sure we didn't create illegal types, if we're not supposed to.
18605   assert(DCI.isBeforeLegalize() ||
18606          DAG.getTargetLoweringInfo().isTypeLegal(SrcVT));
18607 
18608   // First perform a vector comparison, where lane 0 is the one we're interested
18609   // in.
18610   SDLoc DL(N0);
18611   SDValue LHS =
18612       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
18613   SDValue RHS =
18614       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
18615   SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
18616 
18617   // Now duplicate the comparison mask we want across all other lanes.
18618   SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
18619   SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
18620   Mask = DAG.getNode(ISD::BITCAST, DL,
18621                      ResVT.changeVectorElementTypeToInteger(), Mask);
18622 
18623   return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
18624 }
18625 
18626 static SDValue performDUPCombine(SDNode *N,
18627                                  TargetLowering::DAGCombinerInfo &DCI) {
18628   EVT VT = N->getValueType(0);
18629   // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the
18630   // 128bit vector version.
18631   if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) {
18632     EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
18633     if (SDNode *LN = DCI.DAG.getNodeIfExists(
18634             N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) {
18635       SDLoc DL(N);
18636       return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0),
18637                              DCI.DAG.getConstant(0, DL, MVT::i64));
18638     }
18639   }
18640 
18641   return performPostLD1Combine(N, DCI, false);
18642 }
18643 
18644 /// Get rid of unnecessary NVCASTs (that don't change the type).
18645 static SDValue performNVCASTCombine(SDNode *N) {
18646   if (N->getValueType(0) == N->getOperand(0).getValueType())
18647     return N->getOperand(0);
18648 
18649   return SDValue();
18650 }
18651 
18652 // If all users of the globaladdr are of the form (globaladdr + constant), find
18653 // the smallest constant, fold it into the globaladdr's offset and rewrite the
18654 // globaladdr as (globaladdr + constant) - constant.
18655 static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
18656                                            const AArch64Subtarget *Subtarget,
18657                                            const TargetMachine &TM) {
18658   auto *GN = cast<GlobalAddressSDNode>(N);
18659   if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) !=
18660       AArch64II::MO_NO_FLAG)
18661     return SDValue();
18662 
18663   uint64_t MinOffset = -1ull;
18664   for (SDNode *N : GN->uses()) {
18665     if (N->getOpcode() != ISD::ADD)
18666       return SDValue();
18667     auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0));
18668     if (!C)
18669       C = dyn_cast<ConstantSDNode>(N->getOperand(1));
18670     if (!C)
18671       return SDValue();
18672     MinOffset = std::min(MinOffset, C->getZExtValue());
18673   }
18674   uint64_t Offset = MinOffset + GN->getOffset();
18675 
18676   // Require that the new offset is larger than the existing one. Otherwise, we
18677   // can end up oscillating between two possible DAGs, for example,
18678   // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1).
18679   if (Offset <= uint64_t(GN->getOffset()))
18680     return SDValue();
18681 
18682   // Check whether folding this offset is legal. It must not go out of bounds of
18683   // the referenced object to avoid violating the code model, and must be
18684   // smaller than 2^20 because this is the largest offset expressible in all
18685   // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
18686   // stores an immediate signed 21 bit offset.)
18687   //
18688   // This check also prevents us from folding negative offsets, which will end
18689   // up being treated in the same way as large positive ones. They could also
18690   // cause code model violations, and aren't really common enough to matter.
18691   if (Offset >= (1 << 20))
18692     return SDValue();
18693 
18694   const GlobalValue *GV = GN->getGlobal();
18695   Type *T = GV->getValueType();
18696   if (!T->isSized() ||
18697       Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
18698     return SDValue();
18699 
18700   SDLoc DL(GN);
18701   SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset);
18702   return DAG.getNode(ISD::SUB, DL, MVT::i64, Result,
18703                      DAG.getConstant(MinOffset, DL, MVT::i64));
18704 }
18705 
18706 // Turns the vector of indices into a vector of byte offstes by scaling Offset
18707 // by (BitWidth / 8).
18708 static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
18709                                           SDLoc DL, unsigned BitWidth) {
18710   assert(Offset.getValueType().isScalableVector() &&
18711          "This method is only for scalable vectors of offsets");
18712 
18713   SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64);
18714   SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift);
18715 
18716   return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift);
18717 }
18718 
18719 /// Check if the value of \p OffsetInBytes can be used as an immediate for
18720 /// the gather load/prefetch and scatter store instructions with vector base and
18721 /// immediate offset addressing mode:
18722 ///
18723 ///      [<Zn>.[S|D]{, #<imm>}]
18724 ///
18725 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18726 inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
18727                                                   unsigned ScalarSizeInBytes) {
18728   // The immediate is not a multiple of the scalar size.
18729   if (OffsetInBytes % ScalarSizeInBytes)
18730     return false;
18731 
18732   // The immediate is out of range.
18733   if (OffsetInBytes / ScalarSizeInBytes > 31)
18734     return false;
18735 
18736   return true;
18737 }
18738 
18739 /// Check if the value of \p Offset represents a valid immediate for the SVE
18740 /// gather load/prefetch and scatter store instructiona with vector base and
18741 /// immediate offset addressing mode:
18742 ///
18743 ///      [<Zn>.[S|D]{, #<imm>}]
18744 ///
18745 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18746 static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
18747                                            unsigned ScalarSizeInBytes) {
18748   ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
18749   return OffsetConst && isValidImmForSVEVecImmAddrMode(
18750                             OffsetConst->getZExtValue(), ScalarSizeInBytes);
18751 }
18752 
18753 static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
18754                                           unsigned Opcode,
18755                                           bool OnlyPackedOffsets = true) {
18756   const SDValue Src = N->getOperand(2);
18757   const EVT SrcVT = Src->getValueType(0);
18758   assert(SrcVT.isScalableVector() &&
18759          "Scatter stores are only possible for SVE vectors");
18760 
18761   SDLoc DL(N);
18762   MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
18763 
18764   // Make sure that source data will fit into an SVE register
18765   if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
18766     return SDValue();
18767 
18768   // For FPs, ACLE only supports _packed_ single and double precision types.
18769   if (SrcElVT.isFloatingPoint())
18770     if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64))
18771       return SDValue();
18772 
18773   // Depending on the addressing mode, this is either a pointer or a vector of
18774   // pointers (that fits into one register)
18775   SDValue Base = N->getOperand(4);
18776   // Depending on the addressing mode, this is either a single offset or a
18777   // vector of offsets  (that fits into one register)
18778   SDValue Offset = N->getOperand(5);
18779 
18780   // For "scalar + vector of indices", just scale the indices. This only
18781   // applies to non-temporal scatters because there's no instruction that takes
18782   // indicies.
18783   if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
18784     Offset =
18785         getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
18786     Opcode = AArch64ISD::SSTNT1_PRED;
18787   }
18788 
18789   // In the case of non-temporal gather loads there's only one SVE instruction
18790   // per data-size: "scalar + vector", i.e.
18791   //    * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
18792   // Since we do have intrinsics that allow the arguments to be in a different
18793   // order, we may need to swap them to match the spec.
18794   if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector())
18795     std::swap(Base, Offset);
18796 
18797   // SST1_IMM requires that the offset is an immediate that is:
18798   //    * a multiple of #SizeInBytes,
18799   //    * in the range [0, 31 x #SizeInBytes],
18800   // where #SizeInBytes is the size in bytes of the stored items. For
18801   // immediates outside that range and non-immediate scalar offsets use SST1 or
18802   // SST1_UXTW instead.
18803   if (Opcode == AArch64ISD::SST1_IMM_PRED) {
18804     if (!isValidImmForSVEVecImmAddrMode(Offset,
18805                                         SrcVT.getScalarSizeInBits() / 8)) {
18806       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
18807         Opcode = AArch64ISD::SST1_UXTW_PRED;
18808       else
18809         Opcode = AArch64ISD::SST1_PRED;
18810 
18811       std::swap(Base, Offset);
18812     }
18813   }
18814 
18815   auto &TLI = DAG.getTargetLoweringInfo();
18816   if (!TLI.isTypeLegal(Base.getValueType()))
18817     return SDValue();
18818 
18819   // Some scatter store variants allow unpacked offsets, but only as nxv2i32
18820   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
18821   // nxv2i64. Legalize accordingly.
18822   if (!OnlyPackedOffsets &&
18823       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
18824     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
18825 
18826   if (!TLI.isTypeLegal(Offset.getValueType()))
18827     return SDValue();
18828 
18829   // Source value type that is representable in hardware
18830   EVT HwSrcVt = getSVEContainerType(SrcVT);
18831 
18832   // Keep the original type of the input data to store - this is needed to be
18833   // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For
18834   // FP values we want the integer equivalent, so just use HwSrcVt.
18835   SDValue InputVT = DAG.getValueType(SrcVT);
18836   if (SrcVT.isFloatingPoint())
18837     InputVT = DAG.getValueType(HwSrcVt);
18838 
18839   SDVTList VTs = DAG.getVTList(MVT::Other);
18840   SDValue SrcNew;
18841 
18842   if (Src.getValueType().isFloatingPoint())
18843     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src);
18844   else
18845     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src);
18846 
18847   SDValue Ops[] = {N->getOperand(0), // Chain
18848                    SrcNew,
18849                    N->getOperand(3), // Pg
18850                    Base,
18851                    Offset,
18852                    InputVT};
18853 
18854   return DAG.getNode(Opcode, DL, VTs, Ops);
18855 }
18856 
18857 static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
18858                                         unsigned Opcode,
18859                                         bool OnlyPackedOffsets = true) {
18860   const EVT RetVT = N->getValueType(0);
18861   assert(RetVT.isScalableVector() &&
18862          "Gather loads are only possible for SVE vectors");
18863 
18864   SDLoc DL(N);
18865 
18866   // Make sure that the loaded data will fit into an SVE register
18867   if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
18868     return SDValue();
18869 
18870   // Depending on the addressing mode, this is either a pointer or a vector of
18871   // pointers (that fits into one register)
18872   SDValue Base = N->getOperand(3);
18873   // Depending on the addressing mode, this is either a single offset or a
18874   // vector of offsets  (that fits into one register)
18875   SDValue Offset = N->getOperand(4);
18876 
18877   // For "scalar + vector of indices", just scale the indices. This only
18878   // applies to non-temporal gathers because there's no instruction that takes
18879   // indicies.
18880   if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) {
18881     Offset = getScaledOffsetForBitWidth(DAG, Offset, DL,
18882                                         RetVT.getScalarSizeInBits());
18883     Opcode = AArch64ISD::GLDNT1_MERGE_ZERO;
18884   }
18885 
18886   // In the case of non-temporal gather loads there's only one SVE instruction
18887   // per data-size: "scalar + vector", i.e.
18888   //    * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
18889   // Since we do have intrinsics that allow the arguments to be in a different
18890   // order, we may need to swap them to match the spec.
18891   if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO &&
18892       Offset.getValueType().isVector())
18893     std::swap(Base, Offset);
18894 
18895   // GLD{FF}1_IMM requires that the offset is an immediate that is:
18896   //    * a multiple of #SizeInBytes,
18897   //    * in the range [0, 31 x #SizeInBytes],
18898   // where #SizeInBytes is the size in bytes of the loaded items. For
18899   // immediates outside that range and non-immediate scalar offsets use
18900   // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead.
18901   if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO ||
18902       Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) {
18903     if (!isValidImmForSVEVecImmAddrMode(Offset,
18904                                         RetVT.getScalarSizeInBits() / 8)) {
18905       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
18906         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
18907                      ? AArch64ISD::GLD1_UXTW_MERGE_ZERO
18908                      : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO;
18909       else
18910         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
18911                      ? AArch64ISD::GLD1_MERGE_ZERO
18912                      : AArch64ISD::GLDFF1_MERGE_ZERO;
18913 
18914       std::swap(Base, Offset);
18915     }
18916   }
18917 
18918   auto &TLI = DAG.getTargetLoweringInfo();
18919   if (!TLI.isTypeLegal(Base.getValueType()))
18920     return SDValue();
18921 
18922   // Some gather load variants allow unpacked offsets, but only as nxv2i32
18923   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
18924   // nxv2i64. Legalize accordingly.
18925   if (!OnlyPackedOffsets &&
18926       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
18927     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
18928 
18929   // Return value type that is representable in hardware
18930   EVT HwRetVt = getSVEContainerType(RetVT);
18931 
18932   // Keep the original output value type around - this is needed to be able to
18933   // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP
18934   // values we want the integer equivalent, so just use HwRetVT.
18935   SDValue OutVT = DAG.getValueType(RetVT);
18936   if (RetVT.isFloatingPoint())
18937     OutVT = DAG.getValueType(HwRetVt);
18938 
18939   SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other);
18940   SDValue Ops[] = {N->getOperand(0), // Chain
18941                    N->getOperand(2), // Pg
18942                    Base, Offset, OutVT};
18943 
18944   SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops);
18945   SDValue LoadChain = SDValue(Load.getNode(), 1);
18946 
18947   if (RetVT.isInteger() && (RetVT != HwRetVt))
18948     Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0));
18949 
18950   // If the original return value was FP, bitcast accordingly. Doing it here
18951   // means that we can avoid adding TableGen patterns for FPs.
18952   if (RetVT.isFloatingPoint())
18953     Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0));
18954 
18955   return DAG.getMergeValues({Load, LoadChain}, DL);
18956 }
18957 
18958 static SDValue
18959 performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
18960                               SelectionDAG &DAG) {
18961   SDLoc DL(N);
18962   SDValue Src = N->getOperand(0);
18963   unsigned Opc = Src->getOpcode();
18964 
18965   // Sign extend of an unsigned unpack -> signed unpack
18966   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
18967 
18968     unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI
18969                                                : AArch64ISD::SUNPKLO;
18970 
18971     // Push the sign extend to the operand of the unpack
18972     // This is necessary where, for example, the operand of the unpack
18973     // is another unpack:
18974     // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8)
18975     // ->
18976     // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8)
18977     // ->
18978     // 4i32 sunpklo(8i16 sunpklo(16i8 opnd))
18979     SDValue ExtOp = Src->getOperand(0);
18980     auto VT = cast<VTSDNode>(N->getOperand(1))->getVT();
18981     EVT EltTy = VT.getVectorElementType();
18982     (void)EltTy;
18983 
18984     assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&
18985            "Sign extending from an invalid type");
18986 
18987     EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext());
18988 
18989     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(),
18990                               ExtOp, DAG.getValueType(ExtVT));
18991 
18992     return DAG.getNode(SOpc, DL, N->getValueType(0), Ext);
18993   }
18994 
18995   if (DCI.isBeforeLegalizeOps())
18996     return SDValue();
18997 
18998   if (!EnableCombineMGatherIntrinsics)
18999     return SDValue();
19000 
19001   // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
19002   // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
19003   unsigned NewOpc;
19004   unsigned MemVTOpNum = 4;
19005   switch (Opc) {
19006   case AArch64ISD::LD1_MERGE_ZERO:
19007     NewOpc = AArch64ISD::LD1S_MERGE_ZERO;
19008     MemVTOpNum = 3;
19009     break;
19010   case AArch64ISD::LDNF1_MERGE_ZERO:
19011     NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO;
19012     MemVTOpNum = 3;
19013     break;
19014   case AArch64ISD::LDFF1_MERGE_ZERO:
19015     NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO;
19016     MemVTOpNum = 3;
19017     break;
19018   case AArch64ISD::GLD1_MERGE_ZERO:
19019     NewOpc = AArch64ISD::GLD1S_MERGE_ZERO;
19020     break;
19021   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19022     NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
19023     break;
19024   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19025     NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
19026     break;
19027   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19028     NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
19029     break;
19030   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19031     NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
19032     break;
19033   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19034     NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
19035     break;
19036   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19037     NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO;
19038     break;
19039   case AArch64ISD::GLDFF1_MERGE_ZERO:
19040     NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO;
19041     break;
19042   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
19043     NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO;
19044     break;
19045   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
19046     NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO;
19047     break;
19048   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
19049     NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO;
19050     break;
19051   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
19052     NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO;
19053     break;
19054   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
19055     NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO;
19056     break;
19057   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
19058     NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO;
19059     break;
19060   case AArch64ISD::GLDNT1_MERGE_ZERO:
19061     NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO;
19062     break;
19063   default:
19064     return SDValue();
19065   }
19066 
19067   EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
19068   EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT();
19069 
19070   if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse())
19071     return SDValue();
19072 
19073   EVT DstVT = N->getValueType(0);
19074   SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
19075 
19076   SmallVector<SDValue, 5> Ops;
19077   for (unsigned I = 0; I < Src->getNumOperands(); ++I)
19078     Ops.push_back(Src->getOperand(I));
19079 
19080   SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
19081   DCI.CombineTo(N, ExtLoad);
19082   DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
19083 
19084   // Return N so it doesn't get rechecked
19085   return SDValue(N, 0);
19086 }
19087 
19088 /// Legalize the gather prefetch (scalar + vector addressing mode) when the
19089 /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset
19090 /// != nxv2i32) do not need legalization.
19091 static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
19092   const unsigned OffsetPos = 4;
19093   SDValue Offset = N->getOperand(OffsetPos);
19094 
19095   // Not an unpacked vector, bail out.
19096   if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32)
19097     return SDValue();
19098 
19099   // Extend the unpacked offset vector to 64-bit lanes.
19100   SDLoc DL(N);
19101   Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset);
19102   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19103   // Replace the offset operand with the 64-bit one.
19104   Ops[OffsetPos] = Offset;
19105 
19106   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19107 }
19108 
19109 /// Combines a node carrying the intrinsic
19110 /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses
19111 /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to
19112 /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the
19113 /// sve gather prefetch instruction with vector plus immediate addressing mode.
19114 static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
19115                                                unsigned ScalarSizeInBytes) {
19116   const unsigned ImmPos = 4, OffsetPos = 3;
19117   // No need to combine the node if the immediate is valid...
19118   if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes))
19119     return SDValue();
19120 
19121   // ...otherwise swap the offset base with the offset...
19122   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19123   std::swap(Ops[ImmPos], Ops[OffsetPos]);
19124   // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
19125   // `aarch64_sve_prfb_gather_uxtw_index`.
19126   SDLoc DL(N);
19127   Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
19128                            MVT::i64);
19129 
19130   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19131 }
19132 
19133 // Return true if the vector operation can guarantee only the first lane of its
19134 // result contains data, with all bits in other lanes set to zero.
19135 static bool isLanes1toNKnownZero(SDValue Op) {
19136   switch (Op.getOpcode()) {
19137   default:
19138     return false;
19139   case AArch64ISD::ANDV_PRED:
19140   case AArch64ISD::EORV_PRED:
19141   case AArch64ISD::FADDA_PRED:
19142   case AArch64ISD::FADDV_PRED:
19143   case AArch64ISD::FMAXNMV_PRED:
19144   case AArch64ISD::FMAXV_PRED:
19145   case AArch64ISD::FMINNMV_PRED:
19146   case AArch64ISD::FMINV_PRED:
19147   case AArch64ISD::ORV_PRED:
19148   case AArch64ISD::SADDV_PRED:
19149   case AArch64ISD::SMAXV_PRED:
19150   case AArch64ISD::SMINV_PRED:
19151   case AArch64ISD::UADDV_PRED:
19152   case AArch64ISD::UMAXV_PRED:
19153   case AArch64ISD::UMINV_PRED:
19154     return true;
19155   }
19156 }
19157 
19158 static SDValue removeRedundantInsertVectorElt(SDNode *N) {
19159   assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
19160   SDValue InsertVec = N->getOperand(0);
19161   SDValue InsertElt = N->getOperand(1);
19162   SDValue InsertIdx = N->getOperand(2);
19163 
19164   // We only care about inserts into the first element...
19165   if (!isNullConstant(InsertIdx))
19166     return SDValue();
19167   // ...of a zero'd vector...
19168   if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
19169     return SDValue();
19170   // ...where the inserted data was previously extracted...
19171   if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19172     return SDValue();
19173 
19174   SDValue ExtractVec = InsertElt.getOperand(0);
19175   SDValue ExtractIdx = InsertElt.getOperand(1);
19176 
19177   // ...from the first element of a vector.
19178   if (!isNullConstant(ExtractIdx))
19179     return SDValue();
19180 
19181   // If we get here we are effectively trying to zero lanes 1-N of a vector.
19182 
19183   // Ensure there's no type conversion going on.
19184   if (N->getValueType(0) != ExtractVec.getValueType())
19185     return SDValue();
19186 
19187   if (!isLanes1toNKnownZero(ExtractVec))
19188     return SDValue();
19189 
19190   // The explicit zeroing is redundant.
19191   return ExtractVec;
19192 }
19193 
19194 static SDValue
19195 performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
19196   if (SDValue Res = removeRedundantInsertVectorElt(N))
19197     return Res;
19198 
19199   return performPostLD1Combine(N, DCI, true);
19200 }
19201 
19202 static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) {
19203   EVT Ty = N->getValueType(0);
19204   if (Ty.isInteger())
19205     return SDValue();
19206 
19207   EVT IntTy = Ty.changeVectorElementTypeToInteger();
19208   EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount());
19209   if (ExtIntTy.getVectorElementType().getScalarSizeInBits() <
19210       IntTy.getVectorElementType().getScalarSizeInBits())
19211     return SDValue();
19212 
19213   SDLoc DL(N);
19214   SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)),
19215                                      DL, ExtIntTy);
19216   SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)),
19217                                      DL, ExtIntTy);
19218   SDValue Idx = N->getOperand(2);
19219   SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx);
19220   SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy);
19221   return DAG.getBitcast(Ty, Trunc);
19222 }
19223 
19224 static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
19225                                       TargetLowering::DAGCombinerInfo &DCI,
19226                                       const AArch64Subtarget *Subtarget) {
19227   SDValue N0 = N->getOperand(0);
19228   EVT VT = N->getValueType(0);
19229 
19230   // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
19231   if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
19232     return SDValue();
19233 
19234   // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
19235   // We purposefully don't care about legality of the nodes here as we know
19236   // they can be split down into something legal.
19237   if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) &&
19238       N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() &&
19239       VT.isFixedLengthVector() &&
19240       VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) {
19241     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
19242     SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
19243                                      LN0->getChain(), LN0->getBasePtr(),
19244                                      N0.getValueType(), LN0->getMemOperand());
19245     DCI.CombineTo(N, ExtLoad);
19246     DCI.CombineTo(N0.getNode(),
19247                   DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(),
19248                               ExtLoad, DAG.getIntPtrConstant(1, SDLoc(N0))),
19249                   ExtLoad.getValue(1));
19250     return SDValue(N, 0); // Return N so it doesn't get rechecked!
19251   }
19252 
19253   return SDValue();
19254 }
19255 
19256 static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
19257                                       const AArch64Subtarget *Subtarget,
19258                                       bool fixedSVEVectorVT) {
19259   EVT VT = N->getValueType(0);
19260 
19261   // Don't expand for SVE2
19262   if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
19263     return SDValue();
19264 
19265   // Don't expand for NEON
19266   if (VT.isFixedLengthVector() && !fixedSVEVectorVT)
19267     return SDValue();
19268 
19269   SDLoc DL(N);
19270 
19271   SDValue Mask = N->getOperand(0);
19272   SDValue In1 = N->getOperand(1);
19273   SDValue In2 = N->getOperand(2);
19274 
19275   SDValue InvMask = DAG.getNOT(DL, Mask, VT);
19276   SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1);
19277   SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2);
19278   return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv);
19279 }
19280 
19281 static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
19282   EVT VT = N->getValueType(0);
19283 
19284   SDValue Insert = N->getOperand(0);
19285   if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
19286     return SDValue();
19287 
19288   if (!Insert.getOperand(0).isUndef())
19289     return SDValue();
19290 
19291   uint64_t IdxInsert = Insert.getConstantOperandVal(2);
19292   uint64_t IdxDupLane = N->getConstantOperandVal(1);
19293   if (IdxInsert != IdxDupLane)
19294     return SDValue();
19295 
19296   SDValue Bitcast = Insert.getOperand(1);
19297   if (Bitcast.getOpcode() != ISD::BITCAST)
19298     return SDValue();
19299 
19300   SDValue Subvec = Bitcast.getOperand(0);
19301   EVT SubvecVT = Subvec.getValueType();
19302   if (!SubvecVT.is128BitVector())
19303     return SDValue();
19304   EVT NewSubvecVT =
19305       getPackedSVEVectorVT(Subvec.getValueType().getVectorElementType());
19306 
19307   SDLoc DL(N);
19308   SDValue NewInsert =
19309       DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewSubvecVT,
19310                   DAG.getUNDEF(NewSubvecVT), Subvec, Insert->getOperand(2));
19311   SDValue NewDuplane128 = DAG.getNode(AArch64ISD::DUPLANE128, DL, NewSubvecVT,
19312                                       NewInsert, N->getOperand(1));
19313   return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128);
19314 }
19315 
19316 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
19317                                                  DAGCombinerInfo &DCI) const {
19318   SelectionDAG &DAG = DCI.DAG;
19319   switch (N->getOpcode()) {
19320   default:
19321     LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
19322     break;
19323   case ISD::ADD:
19324   case ISD::SUB:
19325     return performAddSubCombine(N, DCI, DAG);
19326   case AArch64ISD::ANDS:
19327     return performFlagSettingCombine(N, DCI, ISD::AND);
19328   case AArch64ISD::ADC:
19329     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19330       return R;
19331     return foldADCToCINC(N, DAG);
19332   case AArch64ISD::SBC:
19333     return foldOverflowCheck(N, DAG, /* IsAdd */ false);
19334   case AArch64ISD::ADCS:
19335     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19336       return R;
19337     return performFlagSettingCombine(N, DCI, AArch64ISD::ADC);
19338   case AArch64ISD::SBCS:
19339     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false))
19340       return R;
19341     return performFlagSettingCombine(N, DCI, AArch64ISD::SBC);
19342   case ISD::XOR:
19343     return performXorCombine(N, DAG, DCI, Subtarget);
19344   case ISD::MUL:
19345     return performMulCombine(N, DAG, DCI, Subtarget);
19346   case ISD::SINT_TO_FP:
19347   case ISD::UINT_TO_FP:
19348     return performIntToFpCombine(N, DAG, Subtarget);
19349   case ISD::FP_TO_SINT:
19350   case ISD::FP_TO_UINT:
19351   case ISD::FP_TO_SINT_SAT:
19352   case ISD::FP_TO_UINT_SAT:
19353     return performFpToIntCombine(N, DAG, DCI, Subtarget);
19354   case ISD::FDIV:
19355     return performFDivCombine(N, DAG, DCI, Subtarget);
19356   case ISD::OR:
19357     return performORCombine(N, DCI, Subtarget);
19358   case ISD::AND:
19359     return performANDCombine(N, DCI);
19360   case ISD::INTRINSIC_WO_CHAIN:
19361     return performIntrinsicCombine(N, DCI, Subtarget);
19362   case ISD::ANY_EXTEND:
19363   case ISD::ZERO_EXTEND:
19364   case ISD::SIGN_EXTEND:
19365     return performExtendCombine(N, DCI, DAG);
19366   case ISD::SIGN_EXTEND_INREG:
19367     return performSignExtendInRegCombine(N, DCI, DAG);
19368   case ISD::CONCAT_VECTORS:
19369     return performConcatVectorsCombine(N, DCI, DAG);
19370   case ISD::EXTRACT_SUBVECTOR:
19371     return performExtractSubvectorCombine(N, DCI, DAG);
19372   case ISD::INSERT_SUBVECTOR:
19373     return performInsertSubvectorCombine(N, DCI, DAG);
19374   case ISD::SELECT:
19375     return performSelectCombine(N, DCI);
19376   case ISD::VSELECT:
19377     return performVSelectCombine(N, DCI.DAG);
19378   case ISD::SETCC:
19379     return performSETCCCombine(N, DAG);
19380   case ISD::LOAD:
19381     if (performTBISimplification(N->getOperand(1), DCI, DAG))
19382       return SDValue(N, 0);
19383     break;
19384   case ISD::STORE:
19385     return performSTORECombine(N, DCI, DAG, Subtarget);
19386   case ISD::MGATHER:
19387   case ISD::MSCATTER:
19388     return performMaskedGatherScatterCombine(N, DCI, DAG);
19389   case ISD::VECTOR_SPLICE:
19390     return performSVESpliceCombine(N, DAG);
19391   case ISD::FP_EXTEND:
19392     return performFPExtendCombine(N, DAG, DCI, Subtarget);
19393   case AArch64ISD::BRCOND:
19394     return performBRCONDCombine(N, DCI, DAG);
19395   case AArch64ISD::TBNZ:
19396   case AArch64ISD::TBZ:
19397     return performTBZCombine(N, DCI, DAG);
19398   case AArch64ISD::CSEL:
19399     return performCSELCombine(N, DCI, DAG);
19400   case AArch64ISD::DUP:
19401     return performDUPCombine(N, DCI);
19402   case AArch64ISD::DUPLANE128:
19403     return performDupLane128Combine(N, DAG);
19404   case AArch64ISD::NVCAST:
19405     return performNVCASTCombine(N);
19406   case AArch64ISD::SPLICE:
19407     return performSpliceCombine(N, DAG);
19408   case AArch64ISD::UUNPKLO:
19409   case AArch64ISD::UUNPKHI:
19410     return performUnpackCombine(N, DAG);
19411   case AArch64ISD::UZP1:
19412     return performUzpCombine(N, DAG);
19413   case AArch64ISD::SETCC_MERGE_ZERO:
19414     return performSetccMergeZeroCombine(N, DCI);
19415   case AArch64ISD::GLD1_MERGE_ZERO:
19416   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19417   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19418   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19419   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19420   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19421   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19422   case AArch64ISD::GLD1S_MERGE_ZERO:
19423   case AArch64ISD::GLD1S_SCALED_MERGE_ZERO:
19424   case AArch64ISD::GLD1S_UXTW_MERGE_ZERO:
19425   case AArch64ISD::GLD1S_SXTW_MERGE_ZERO:
19426   case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO:
19427   case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO:
19428   case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
19429     return performGLD1Combine(N, DAG);
19430   case AArch64ISD::VASHR:
19431   case AArch64ISD::VLSHR:
19432     return performVectorShiftCombine(N, *this, DCI);
19433   case AArch64ISD::SUNPKLO:
19434     return performSunpkloCombine(N, DAG);
19435   case AArch64ISD::BSP:
19436     return performBSPExpandForSVE(
19437         N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0)));
19438   case ISD::INSERT_VECTOR_ELT:
19439     return performInsertVectorEltCombine(N, DCI);
19440   case ISD::EXTRACT_VECTOR_ELT:
19441     return performExtractVectorEltCombine(N, DCI, Subtarget);
19442   case ISD::VECREDUCE_ADD:
19443     return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
19444   case AArch64ISD::UADDV:
19445     return performUADDVCombine(N, DAG);
19446   case AArch64ISD::SMULL:
19447   case AArch64ISD::UMULL:
19448     return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG);
19449   case ISD::INTRINSIC_VOID:
19450   case ISD::INTRINSIC_W_CHAIN:
19451     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
19452     case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
19453       return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
19454     case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
19455       return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/);
19456     case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
19457       return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/);
19458     case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
19459       return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/);
19460     case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
19461     case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
19462     case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
19463     case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
19464     case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
19465     case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
19466     case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
19467     case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
19468       return legalizeSVEGatherPrefetchOffsVec(N, DAG);
19469     case Intrinsic::aarch64_neon_ld2:
19470     case Intrinsic::aarch64_neon_ld3:
19471     case Intrinsic::aarch64_neon_ld4:
19472     case Intrinsic::aarch64_neon_ld1x2:
19473     case Intrinsic::aarch64_neon_ld1x3:
19474     case Intrinsic::aarch64_neon_ld1x4:
19475     case Intrinsic::aarch64_neon_ld2lane:
19476     case Intrinsic::aarch64_neon_ld3lane:
19477     case Intrinsic::aarch64_neon_ld4lane:
19478     case Intrinsic::aarch64_neon_ld2r:
19479     case Intrinsic::aarch64_neon_ld3r:
19480     case Intrinsic::aarch64_neon_ld4r:
19481     case Intrinsic::aarch64_neon_st2:
19482     case Intrinsic::aarch64_neon_st3:
19483     case Intrinsic::aarch64_neon_st4:
19484     case Intrinsic::aarch64_neon_st1x2:
19485     case Intrinsic::aarch64_neon_st1x3:
19486     case Intrinsic::aarch64_neon_st1x4:
19487     case Intrinsic::aarch64_neon_st2lane:
19488     case Intrinsic::aarch64_neon_st3lane:
19489     case Intrinsic::aarch64_neon_st4lane:
19490       return performNEONPostLDSTCombine(N, DCI, DAG);
19491     case Intrinsic::aarch64_sve_ldnt1:
19492       return performLDNT1Combine(N, DAG);
19493     case Intrinsic::aarch64_sve_ld1rq:
19494       return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG);
19495     case Intrinsic::aarch64_sve_ld1ro:
19496       return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG);
19497     case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
19498       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19499     case Intrinsic::aarch64_sve_ldnt1_gather:
19500       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19501     case Intrinsic::aarch64_sve_ldnt1_gather_index:
19502       return performGatherLoadCombine(N, DAG,
19503                                       AArch64ISD::GLDNT1_INDEX_MERGE_ZERO);
19504     case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
19505       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19506     case Intrinsic::aarch64_sve_ld1:
19507       return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO);
19508     case Intrinsic::aarch64_sve_ldnf1:
19509       return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO);
19510     case Intrinsic::aarch64_sve_ldff1:
19511       return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO);
19512     case Intrinsic::aarch64_sve_st1:
19513       return performST1Combine(N, DAG);
19514     case Intrinsic::aarch64_sve_stnt1:
19515       return performSTNT1Combine(N, DAG);
19516     case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
19517       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19518     case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
19519       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19520     case Intrinsic::aarch64_sve_stnt1_scatter:
19521       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19522     case Intrinsic::aarch64_sve_stnt1_scatter_index:
19523       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED);
19524     case Intrinsic::aarch64_sve_ld1_gather:
19525       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO);
19526     case Intrinsic::aarch64_sve_ld1_gather_index:
19527       return performGatherLoadCombine(N, DAG,
19528                                       AArch64ISD::GLD1_SCALED_MERGE_ZERO);
19529     case Intrinsic::aarch64_sve_ld1_gather_sxtw:
19530       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO,
19531                                       /*OnlyPackedOffsets=*/false);
19532     case Intrinsic::aarch64_sve_ld1_gather_uxtw:
19533       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO,
19534                                       /*OnlyPackedOffsets=*/false);
19535     case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
19536       return performGatherLoadCombine(N, DAG,
19537                                       AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO,
19538                                       /*OnlyPackedOffsets=*/false);
19539     case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
19540       return performGatherLoadCombine(N, DAG,
19541                                       AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO,
19542                                       /*OnlyPackedOffsets=*/false);
19543     case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
19544       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO);
19545     case Intrinsic::aarch64_sve_ldff1_gather:
19546       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO);
19547     case Intrinsic::aarch64_sve_ldff1_gather_index:
19548       return performGatherLoadCombine(N, DAG,
19549                                       AArch64ISD::GLDFF1_SCALED_MERGE_ZERO);
19550     case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
19551       return performGatherLoadCombine(N, DAG,
19552                                       AArch64ISD::GLDFF1_SXTW_MERGE_ZERO,
19553                                       /*OnlyPackedOffsets=*/false);
19554     case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
19555       return performGatherLoadCombine(N, DAG,
19556                                       AArch64ISD::GLDFF1_UXTW_MERGE_ZERO,
19557                                       /*OnlyPackedOffsets=*/false);
19558     case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
19559       return performGatherLoadCombine(N, DAG,
19560                                       AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO,
19561                                       /*OnlyPackedOffsets=*/false);
19562     case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
19563       return performGatherLoadCombine(N, DAG,
19564                                       AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO,
19565                                       /*OnlyPackedOffsets=*/false);
19566     case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
19567       return performGatherLoadCombine(N, DAG,
19568                                       AArch64ISD::GLDFF1_IMM_MERGE_ZERO);
19569     case Intrinsic::aarch64_sve_st1_scatter:
19570       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED);
19571     case Intrinsic::aarch64_sve_st1_scatter_index:
19572       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED);
19573     case Intrinsic::aarch64_sve_st1_scatter_sxtw:
19574       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED,
19575                                         /*OnlyPackedOffsets=*/false);
19576     case Intrinsic::aarch64_sve_st1_scatter_uxtw:
19577       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED,
19578                                         /*OnlyPackedOffsets=*/false);
19579     case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
19580       return performScatterStoreCombine(N, DAG,
19581                                         AArch64ISD::SST1_SXTW_SCALED_PRED,
19582                                         /*OnlyPackedOffsets=*/false);
19583     case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
19584       return performScatterStoreCombine(N, DAG,
19585                                         AArch64ISD::SST1_UXTW_SCALED_PRED,
19586                                         /*OnlyPackedOffsets=*/false);
19587     case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
19588       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
19589     case Intrinsic::aarch64_sve_tuple_get: {
19590       SDLoc DL(N);
19591       SDValue Chain = N->getOperand(0);
19592       SDValue Src1 = N->getOperand(2);
19593       SDValue Idx = N->getOperand(3);
19594 
19595       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19596       EVT ResVT = N->getValueType(0);
19597       uint64_t NumLanes = ResVT.getVectorElementCount().getKnownMinValue();
19598       SDValue ExtIdx = DAG.getVectorIdxConstant(IdxConst * NumLanes, DL);
19599       SDValue Val =
19600           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Src1, ExtIdx);
19601       return DAG.getMergeValues({Val, Chain}, DL);
19602     }
19603     case Intrinsic::aarch64_sve_tuple_set: {
19604       SDLoc DL(N);
19605       SDValue Chain = N->getOperand(0);
19606       SDValue Tuple = N->getOperand(2);
19607       SDValue Idx = N->getOperand(3);
19608       SDValue Vec = N->getOperand(4);
19609 
19610       EVT TupleVT = Tuple.getValueType();
19611       uint64_t TupleLanes = TupleVT.getVectorElementCount().getKnownMinValue();
19612 
19613       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19614       uint64_t NumLanes =
19615           Vec.getValueType().getVectorElementCount().getKnownMinValue();
19616 
19617       if ((TupleLanes % NumLanes) != 0)
19618         report_fatal_error("invalid tuple vector!");
19619 
19620       uint64_t NumVecs = TupleLanes / NumLanes;
19621 
19622       SmallVector<SDValue, 4> Opnds;
19623       for (unsigned I = 0; I < NumVecs; ++I) {
19624         if (I == IdxConst)
19625           Opnds.push_back(Vec);
19626         else {
19627           SDValue ExtIdx = DAG.getVectorIdxConstant(I * NumLanes, DL);
19628           Opnds.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
19629                                       Vec.getValueType(), Tuple, ExtIdx));
19630         }
19631       }
19632       SDValue Concat =
19633           DAG.getNode(ISD::CONCAT_VECTORS, DL, Tuple.getValueType(), Opnds);
19634       return DAG.getMergeValues({Concat, Chain}, DL);
19635     }
19636     case Intrinsic::aarch64_sve_tuple_create2:
19637     case Intrinsic::aarch64_sve_tuple_create3:
19638     case Intrinsic::aarch64_sve_tuple_create4: {
19639       SDLoc DL(N);
19640       SDValue Chain = N->getOperand(0);
19641 
19642       SmallVector<SDValue, 4> Opnds;
19643       for (unsigned I = 2; I < N->getNumOperands(); ++I)
19644         Opnds.push_back(N->getOperand(I));
19645 
19646       EVT VT = Opnds[0].getValueType();
19647       EVT EltVT = VT.getVectorElementType();
19648       EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
19649                                     VT.getVectorElementCount() *
19650                                         (N->getNumOperands() - 2));
19651       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds);
19652       return DAG.getMergeValues({Concat, Chain}, DL);
19653     }
19654     case Intrinsic::aarch64_sve_ld2:
19655     case Intrinsic::aarch64_sve_ld3:
19656     case Intrinsic::aarch64_sve_ld4: {
19657       SDLoc DL(N);
19658       SDValue Chain = N->getOperand(0);
19659       SDValue Mask = N->getOperand(2);
19660       SDValue BasePtr = N->getOperand(3);
19661       SDValue LoadOps[] = {Chain, Mask, BasePtr};
19662       unsigned IntrinsicID =
19663           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19664       SDValue Result =
19665           LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL);
19666       return DAG.getMergeValues({Result, Chain}, DL);
19667     }
19668     case Intrinsic::aarch64_rndr:
19669     case Intrinsic::aarch64_rndrrs: {
19670       unsigned IntrinsicID =
19671           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19672       auto Register =
19673           (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
19674                                                   : AArch64SysReg::RNDRRS);
19675       SDLoc DL(N);
19676       SDValue A = DAG.getNode(
19677           AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
19678           N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64));
19679       SDValue B = DAG.getNode(
19680           AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
19681           DAG.getConstant(0, DL, MVT::i32),
19682           DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
19683       return DAG.getMergeValues(
19684           {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
19685     }
19686     default:
19687       break;
19688     }
19689     break;
19690   case ISD::GlobalAddress:
19691     return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine());
19692   }
19693   return SDValue();
19694 }
19695 
19696 // Check if the return value is used as only a return value, as otherwise
19697 // we can't perform a tail-call. In particular, we need to check for
19698 // target ISD nodes that are returns and any other "odd" constructs
19699 // that the generic analysis code won't necessarily catch.
19700 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
19701                                                SDValue &Chain) const {
19702   if (N->getNumValues() != 1)
19703     return false;
19704   if (!N->hasNUsesOfValue(1, 0))
19705     return false;
19706 
19707   SDValue TCChain = Chain;
19708   SDNode *Copy = *N->use_begin();
19709   if (Copy->getOpcode() == ISD::CopyToReg) {
19710     // If the copy has a glue operand, we conservatively assume it isn't safe to
19711     // perform a tail call.
19712     if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
19713         MVT::Glue)
19714       return false;
19715     TCChain = Copy->getOperand(0);
19716   } else if (Copy->getOpcode() != ISD::FP_EXTEND)
19717     return false;
19718 
19719   bool HasRet = false;
19720   for (SDNode *Node : Copy->uses()) {
19721     if (Node->getOpcode() != AArch64ISD::RET_FLAG)
19722       return false;
19723     HasRet = true;
19724   }
19725 
19726   if (!HasRet)
19727     return false;
19728 
19729   Chain = TCChain;
19730   return true;
19731 }
19732 
19733 // Return whether the an instruction can potentially be optimized to a tail
19734 // call. This will cause the optimizers to attempt to move, or duplicate,
19735 // return instructions to help enable tail call optimizations for this
19736 // instruction.
19737 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
19738   return CI->isTailCall();
19739 }
19740 
19741 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
19742                                                    SDValue &Offset,
19743                                                    ISD::MemIndexedMode &AM,
19744                                                    bool &IsInc,
19745                                                    SelectionDAG &DAG) const {
19746   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
19747     return false;
19748 
19749   Base = Op->getOperand(0);
19750   // All of the indexed addressing mode instructions take a signed
19751   // 9 bit immediate offset.
19752   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
19753     int64_t RHSC = RHS->getSExtValue();
19754     if (Op->getOpcode() == ISD::SUB)
19755       RHSC = -(uint64_t)RHSC;
19756     if (!isInt<9>(RHSC))
19757       return false;
19758     IsInc = (Op->getOpcode() == ISD::ADD);
19759     Offset = Op->getOperand(1);
19760     return true;
19761   }
19762   return false;
19763 }
19764 
19765 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
19766                                                       SDValue &Offset,
19767                                                       ISD::MemIndexedMode &AM,
19768                                                       SelectionDAG &DAG) const {
19769   EVT VT;
19770   SDValue Ptr;
19771   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19772     VT = LD->getMemoryVT();
19773     Ptr = LD->getBasePtr();
19774   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19775     VT = ST->getMemoryVT();
19776     Ptr = ST->getBasePtr();
19777   } else
19778     return false;
19779 
19780   bool IsInc;
19781   if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
19782     return false;
19783   AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
19784   return true;
19785 }
19786 
19787 bool AArch64TargetLowering::getPostIndexedAddressParts(
19788     SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
19789     ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
19790   EVT VT;
19791   SDValue Ptr;
19792   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19793     VT = LD->getMemoryVT();
19794     Ptr = LD->getBasePtr();
19795   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19796     VT = ST->getMemoryVT();
19797     Ptr = ST->getBasePtr();
19798   } else
19799     return false;
19800 
19801   bool IsInc;
19802   if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
19803     return false;
19804   // Post-indexing updates the base, so it's not a valid transform
19805   // if that's not the same as the load's pointer.
19806   if (Ptr != Base)
19807     return false;
19808   AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
19809   return true;
19810 }
19811 
19812 void AArch64TargetLowering::ReplaceBITCASTResults(
19813     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19814   SDLoc DL(N);
19815   SDValue Op = N->getOperand(0);
19816   EVT VT = N->getValueType(0);
19817   EVT SrcVT = Op.getValueType();
19818 
19819   if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) {
19820     assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
19821            "Expected fp->int bitcast!");
19822 
19823     // Bitcasting between unpacked vector types of different element counts is
19824     // not a NOP because the live elements are laid out differently.
19825     //                01234567
19826     // e.g. nxv2i32 = XX??XX??
19827     //      nxv4f16 = X?X?X?X?
19828     if (VT.getVectorElementCount() != SrcVT.getVectorElementCount())
19829       return;
19830 
19831     SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG);
19832     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult));
19833     return;
19834   }
19835 
19836   if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16))
19837     return;
19838 
19839   Op = SDValue(
19840       DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
19841                          DAG.getUNDEF(MVT::i32), Op,
19842                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
19843       0);
19844   Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
19845   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
19846 }
19847 
19848 static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
19849                                SelectionDAG &DAG,
19850                                const AArch64Subtarget *Subtarget) {
19851   EVT VT = N->getValueType(0);
19852   if (!VT.is256BitVector() ||
19853       (VT.getScalarType().isFloatingPoint() &&
19854        !N->getFlags().hasAllowReassociation()) ||
19855       (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16()))
19856     return;
19857 
19858   SDValue X = N->getOperand(0);
19859   auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1));
19860   if (!Shuf) {
19861     Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0));
19862     X = N->getOperand(1);
19863     if (!Shuf)
19864       return;
19865   }
19866 
19867   if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
19868     return;
19869 
19870   // Check the mask is 1,0,3,2,5,4,...
19871   ArrayRef<int> Mask = Shuf->getMask();
19872   for (int I = 0, E = Mask.size(); I < E; I++)
19873     if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1))
19874       return;
19875 
19876   SDLoc DL(N);
19877   auto LoHi = DAG.SplitVector(X, DL);
19878   assert(LoHi.first.getValueType() == LoHi.second.getValueType());
19879   SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(),
19880                              LoHi.first, LoHi.second);
19881 
19882   // Shuffle the elements back into order.
19883   SmallVector<int> NMask;
19884   for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) {
19885     NMask.push_back(I);
19886     NMask.push_back(I);
19887   }
19888   Results.push_back(
19889       DAG.getVectorShuffle(VT, DL,
19890                            DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp,
19891                                        DAG.getUNDEF(LoHi.first.getValueType())),
19892                            DAG.getUNDEF(VT), NMask));
19893 }
19894 
19895 static void ReplaceReductionResults(SDNode *N,
19896                                     SmallVectorImpl<SDValue> &Results,
19897                                     SelectionDAG &DAG, unsigned InterOp,
19898                                     unsigned AcrossOp) {
19899   EVT LoVT, HiVT;
19900   SDValue Lo, Hi;
19901   SDLoc dl(N);
19902   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
19903   std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
19904   SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi);
19905   SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal);
19906   Results.push_back(SplitVal);
19907 }
19908 
19909 static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) {
19910   SDLoc DL(N);
19911   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N);
19912   SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64,
19913                            DAG.getNode(ISD::SRL, DL, MVT::i128, N,
19914                                        DAG.getConstant(64, DL, MVT::i64)));
19915   return std::make_pair(Lo, Hi);
19916 }
19917 
19918 void AArch64TargetLowering::ReplaceExtractSubVectorResults(
19919     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19920   SDValue In = N->getOperand(0);
19921   EVT InVT = In.getValueType();
19922 
19923   // Common code will handle these just fine.
19924   if (!InVT.isScalableVector() || !InVT.isInteger())
19925     return;
19926 
19927   SDLoc DL(N);
19928   EVT VT = N->getValueType(0);
19929 
19930   // The following checks bail if this is not a halving operation.
19931 
19932   ElementCount ResEC = VT.getVectorElementCount();
19933 
19934   if (InVT.getVectorElementCount() != (ResEC * 2))
19935     return;
19936 
19937   auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1));
19938   if (!CIndex)
19939     return;
19940 
19941   unsigned Index = CIndex->getZExtValue();
19942   if ((Index != 0) && (Index != ResEC.getKnownMinValue()))
19943     return;
19944 
19945   unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI;
19946   EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext());
19947 
19948   SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0));
19949   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
19950 }
19951 
19952 // Create an even/odd pair of X registers holding integer value V.
19953 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
19954   SDLoc dl(V.getNode());
19955   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64);
19956   SDValue VHi = DAG.getAnyExtOrTrunc(
19957       DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)),
19958       dl, MVT::i64);
19959   if (DAG.getDataLayout().isBigEndian())
19960     std::swap (VLo, VHi);
19961   SDValue RegClass =
19962       DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32);
19963   SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32);
19964   SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32);
19965   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
19966   return SDValue(
19967       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
19968 }
19969 
19970 static void ReplaceCMP_SWAP_128Results(SDNode *N,
19971                                        SmallVectorImpl<SDValue> &Results,
19972                                        SelectionDAG &DAG,
19973                                        const AArch64Subtarget *Subtarget) {
19974   assert(N->getValueType(0) == MVT::i128 &&
19975          "AtomicCmpSwap on types less than 128 should be legal");
19976 
19977   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
19978   if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) {
19979     // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type,
19980     // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG.
19981     SDValue Ops[] = {
19982         createGPRPairNode(DAG, N->getOperand(2)), // Compare value
19983         createGPRPairNode(DAG, N->getOperand(3)), // Store value
19984         N->getOperand(1), // Ptr
19985         N->getOperand(0), // Chain in
19986     };
19987 
19988     unsigned Opcode;
19989     switch (MemOp->getMergedOrdering()) {
19990     case AtomicOrdering::Monotonic:
19991       Opcode = AArch64::CASPX;
19992       break;
19993     case AtomicOrdering::Acquire:
19994       Opcode = AArch64::CASPAX;
19995       break;
19996     case AtomicOrdering::Release:
19997       Opcode = AArch64::CASPLX;
19998       break;
19999     case AtomicOrdering::AcquireRelease:
20000     case AtomicOrdering::SequentiallyConsistent:
20001       Opcode = AArch64::CASPALX;
20002       break;
20003     default:
20004       llvm_unreachable("Unexpected ordering!");
20005     }
20006 
20007     MachineSDNode *CmpSwap = DAG.getMachineNode(
20008         Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops);
20009     DAG.setNodeMemRefs(CmpSwap, {MemOp});
20010 
20011     unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
20012     if (DAG.getDataLayout().isBigEndian())
20013       std::swap(SubReg1, SubReg2);
20014     SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64,
20015                                             SDValue(CmpSwap, 0));
20016     SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64,
20017                                             SDValue(CmpSwap, 0));
20018     Results.push_back(
20019         DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi));
20020     Results.push_back(SDValue(CmpSwap, 1)); // Chain out
20021     return;
20022   }
20023 
20024   unsigned Opcode;
20025   switch (MemOp->getMergedOrdering()) {
20026   case AtomicOrdering::Monotonic:
20027     Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
20028     break;
20029   case AtomicOrdering::Acquire:
20030     Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
20031     break;
20032   case AtomicOrdering::Release:
20033     Opcode = AArch64::CMP_SWAP_128_RELEASE;
20034     break;
20035   case AtomicOrdering::AcquireRelease:
20036   case AtomicOrdering::SequentiallyConsistent:
20037     Opcode = AArch64::CMP_SWAP_128;
20038     break;
20039   default:
20040     llvm_unreachable("Unexpected ordering!");
20041   }
20042 
20043   auto Desired = splitInt128(N->getOperand(2), DAG);
20044   auto New = splitInt128(N->getOperand(3), DAG);
20045   SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second,
20046                    New.first,        New.second,    N->getOperand(0)};
20047   SDNode *CmpSwap = DAG.getMachineNode(
20048       Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other),
20049       Ops);
20050   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
20051 
20052   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20053                                 SDValue(CmpSwap, 0), SDValue(CmpSwap, 1)));
20054   Results.push_back(SDValue(CmpSwap, 3));
20055 }
20056 
20057 void AArch64TargetLowering::ReplaceNodeResults(
20058     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20059   switch (N->getOpcode()) {
20060   default:
20061     llvm_unreachable("Don't know how to custom expand this");
20062   case ISD::BITCAST:
20063     ReplaceBITCASTResults(N, Results, DAG);
20064     return;
20065   case ISD::VECREDUCE_ADD:
20066   case ISD::VECREDUCE_SMAX:
20067   case ISD::VECREDUCE_SMIN:
20068   case ISD::VECREDUCE_UMAX:
20069   case ISD::VECREDUCE_UMIN:
20070     Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
20071     return;
20072   case ISD::ADD:
20073   case ISD::FADD:
20074     ReplaceAddWithADDP(N, Results, DAG, Subtarget);
20075     return;
20076 
20077   case ISD::CTPOP:
20078   case ISD::PARITY:
20079     if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG))
20080       Results.push_back(Result);
20081     return;
20082   case AArch64ISD::SADDV:
20083     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
20084     return;
20085   case AArch64ISD::UADDV:
20086     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV);
20087     return;
20088   case AArch64ISD::SMINV:
20089     ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV);
20090     return;
20091   case AArch64ISD::UMINV:
20092     ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV);
20093     return;
20094   case AArch64ISD::SMAXV:
20095     ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV);
20096     return;
20097   case AArch64ISD::UMAXV:
20098     ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV);
20099     return;
20100   case ISD::FP_TO_UINT:
20101   case ISD::FP_TO_SINT:
20102   case ISD::STRICT_FP_TO_SINT:
20103   case ISD::STRICT_FP_TO_UINT:
20104     assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
20105     // Let normal code take care of it by not adding anything to Results.
20106     return;
20107   case ISD::ATOMIC_CMP_SWAP:
20108     ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget);
20109     return;
20110   case ISD::ATOMIC_LOAD:
20111   case ISD::LOAD: {
20112     assert(SDValue(N, 0).getValueType() == MVT::i128 &&
20113            "unexpected load's value type");
20114     MemSDNode *LoadNode = cast<MemSDNode>(N);
20115     if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) ||
20116         LoadNode->getMemoryVT() != MVT::i128) {
20117       // Non-volatile or atomic loads are optimized later in AArch64's load/store
20118       // optimizer.
20119       return;
20120     }
20121 
20122     SDValue Result = DAG.getMemIntrinsicNode(
20123         AArch64ISD::LDP, SDLoc(N),
20124         DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
20125         {LoadNode->getChain(), LoadNode->getBasePtr()}, LoadNode->getMemoryVT(),
20126         LoadNode->getMemOperand());
20127 
20128     SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20129                                Result.getValue(0), Result.getValue(1));
20130     Results.append({Pair, Result.getValue(2) /* Chain */});
20131     return;
20132   }
20133   case ISD::EXTRACT_SUBVECTOR:
20134     ReplaceExtractSubVectorResults(N, Results, DAG);
20135     return;
20136   case ISD::INSERT_SUBVECTOR:
20137   case ISD::CONCAT_VECTORS:
20138     // Custom lowering has been requested for INSERT_SUBVECTOR and
20139     // CONCAT_VECTORS -- but delegate to common code for result type
20140     // legalisation
20141     return;
20142   case ISD::INTRINSIC_WO_CHAIN: {
20143     EVT VT = N->getValueType(0);
20144     assert((VT == MVT::i8 || VT == MVT::i16) &&
20145            "custom lowering for unexpected type");
20146 
20147     ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0));
20148     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
20149     switch (IntID) {
20150     default:
20151       return;
20152     case Intrinsic::aarch64_sve_clasta_n: {
20153       SDLoc DL(N);
20154       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20155       auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32,
20156                            N->getOperand(1), Op2, N->getOperand(3));
20157       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20158       return;
20159     }
20160     case Intrinsic::aarch64_sve_clastb_n: {
20161       SDLoc DL(N);
20162       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20163       auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32,
20164                            N->getOperand(1), Op2, N->getOperand(3));
20165       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20166       return;
20167     }
20168     case Intrinsic::aarch64_sve_lasta: {
20169       SDLoc DL(N);
20170       auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32,
20171                            N->getOperand(1), N->getOperand(2));
20172       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20173       return;
20174     }
20175     case Intrinsic::aarch64_sve_lastb: {
20176       SDLoc DL(N);
20177       auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32,
20178                            N->getOperand(1), N->getOperand(2));
20179       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20180       return;
20181     }
20182     }
20183   }
20184   }
20185 }
20186 
20187 bool AArch64TargetLowering::useLoadStackGuardNode() const {
20188   if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
20189     return TargetLowering::useLoadStackGuardNode();
20190   return true;
20191 }
20192 
20193 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
20194   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
20195   // reciprocal if there are three or more FDIVs.
20196   return 3;
20197 }
20198 
20199 TargetLoweringBase::LegalizeTypeAction
20200 AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
20201   // During type legalization, we prefer to widen v1i8, v1i16, v1i32  to v8i8,
20202   // v4i16, v2i32 instead of to promote.
20203   if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 ||
20204       VT == MVT::v1f32)
20205     return TypeWidenVector;
20206 
20207   return TargetLoweringBase::getPreferredVectorAction(VT);
20208 }
20209 
20210 // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic
20211 // provided the address is 16-byte aligned.
20212 bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
20213   if (!Subtarget->hasLSE2())
20214     return false;
20215 
20216   if (auto LI = dyn_cast<LoadInst>(I))
20217     return LI->getType()->getPrimitiveSizeInBits() == 128 &&
20218            LI->getAlign() >= Align(16);
20219 
20220   if (auto SI = dyn_cast<StoreInst>(I))
20221     return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 &&
20222            SI->getAlign() >= Align(16);
20223 
20224   return false;
20225 }
20226 
20227 bool AArch64TargetLowering::shouldInsertFencesForAtomic(
20228     const Instruction *I) const {
20229   return isOpSuitableForLDPSTP(I);
20230 }
20231 
20232 // Loads and stores less than 128-bits are already atomic; ones above that
20233 // are doomed anyway, so defer to the default libcall and blame the OS when
20234 // things go wrong.
20235 TargetLoweringBase::AtomicExpansionKind
20236 AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
20237   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
20238   if (Size != 128 || isOpSuitableForLDPSTP(SI))
20239     return AtomicExpansionKind::None;
20240   return AtomicExpansionKind::Expand;
20241 }
20242 
20243 // Loads and stores less than 128-bits are already atomic; ones above that
20244 // are doomed anyway, so defer to the default libcall and blame the OS when
20245 // things go wrong.
20246 TargetLowering::AtomicExpansionKind
20247 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
20248   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
20249 
20250   if (Size != 128 || isOpSuitableForLDPSTP(LI))
20251     return AtomicExpansionKind::None;
20252 
20253   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20254   // implement atomicrmw without spilling. If the target address is also on the
20255   // stack and close enough to the spill slot, this can lead to a situation
20256   // where the monitor always gets cleared and the atomic operation can never
20257   // succeed. So at -O0 lower this operation to a CAS loop.
20258   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20259     return AtomicExpansionKind::CmpXChg;
20260 
20261   return AtomicExpansionKind::LLSC;
20262 }
20263 
20264 // For the real atomic operations, we have ldxr/stxr up to 128 bits,
20265 TargetLowering::AtomicExpansionKind
20266 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
20267   if (AI->isFloatingPointOperation())
20268     return AtomicExpansionKind::CmpXChg;
20269 
20270   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
20271   if (Size > 128) return AtomicExpansionKind::None;
20272 
20273   // Nand is not supported in LSE.
20274   // Leave 128 bits to LLSC or CmpXChg.
20275   if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) {
20276     if (Subtarget->hasLSE())
20277       return AtomicExpansionKind::None;
20278     if (Subtarget->outlineAtomics()) {
20279       // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far.
20280       // Don't outline them unless
20281       // (1) high level <atomic> support approved:
20282       //   http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf
20283       // (2) low level libgcc and compiler-rt support implemented by:
20284       //   min/max outline atomics helpers
20285       if (AI->getOperation() != AtomicRMWInst::Min &&
20286           AI->getOperation() != AtomicRMWInst::Max &&
20287           AI->getOperation() != AtomicRMWInst::UMin &&
20288           AI->getOperation() != AtomicRMWInst::UMax) {
20289         return AtomicExpansionKind::None;
20290       }
20291     }
20292   }
20293 
20294   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20295   // implement atomicrmw without spilling. If the target address is also on the
20296   // stack and close enough to the spill slot, this can lead to a situation
20297   // where the monitor always gets cleared and the atomic operation can never
20298   // succeed. So at -O0 lower this operation to a CAS loop.
20299   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20300     return AtomicExpansionKind::CmpXChg;
20301 
20302   return AtomicExpansionKind::LLSC;
20303 }
20304 
20305 TargetLowering::AtomicExpansionKind
20306 AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
20307     AtomicCmpXchgInst *AI) const {
20308   // If subtarget has LSE, leave cmpxchg intact for codegen.
20309   if (Subtarget->hasLSE() || Subtarget->outlineAtomics())
20310     return AtomicExpansionKind::None;
20311   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20312   // implement cmpxchg without spilling. If the address being exchanged is also
20313   // on the stack and close enough to the spill slot, this can lead to a
20314   // situation where the monitor always gets cleared and the atomic operation
20315   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
20316   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20317     return AtomicExpansionKind::None;
20318 
20319   // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand
20320   // it.
20321   unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
20322   if (Size > 64)
20323     return AtomicExpansionKind::None;
20324 
20325   return AtomicExpansionKind::LLSC;
20326 }
20327 
20328 Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
20329                                              Type *ValueTy, Value *Addr,
20330                                              AtomicOrdering Ord) const {
20331   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20332   bool IsAcquire = isAcquireOrStronger(Ord);
20333 
20334   // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
20335   // intrinsic must return {i64, i64} and we have to recombine them into a
20336   // single i128 here.
20337   if (ValueTy->getPrimitiveSizeInBits() == 128) {
20338     Intrinsic::ID Int =
20339         IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
20340     Function *Ldxr = Intrinsic::getDeclaration(M, Int);
20341 
20342     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20343     Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
20344 
20345     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
20346     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
20347     Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
20348     Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
20349     return Builder.CreateOr(
20350         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64");
20351   }
20352 
20353   Type *Tys[] = { Addr->getType() };
20354   Intrinsic::ID Int =
20355       IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
20356   Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
20357 
20358   const DataLayout &DL = M->getDataLayout();
20359   IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
20360   CallInst *CI = Builder.CreateCall(Ldxr, Addr);
20361   CI->addParamAttr(
20362       0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
20363   Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
20364 
20365   return Builder.CreateBitCast(Trunc, ValueTy);
20366 }
20367 
20368 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
20369     IRBuilderBase &Builder) const {
20370   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20371   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
20372 }
20373 
20374 Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
20375                                                    Value *Val, Value *Addr,
20376                                                    AtomicOrdering Ord) const {
20377   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20378   bool IsRelease = isReleaseOrStronger(Ord);
20379 
20380   // Since the intrinsics must have legal type, the i128 intrinsics take two
20381   // parameters: "i64, i64". We must marshal Val into the appropriate form
20382   // before the call.
20383   if (Val->getType()->getPrimitiveSizeInBits() == 128) {
20384     Intrinsic::ID Int =
20385         IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
20386     Function *Stxr = Intrinsic::getDeclaration(M, Int);
20387     Type *Int64Ty = Type::getInt64Ty(M->getContext());
20388 
20389     Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
20390     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
20391     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20392     return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
20393   }
20394 
20395   Intrinsic::ID Int =
20396       IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
20397   Type *Tys[] = { Addr->getType() };
20398   Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
20399 
20400   const DataLayout &DL = M->getDataLayout();
20401   IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
20402   Val = Builder.CreateBitCast(Val, IntValTy);
20403 
20404   CallInst *CI = Builder.CreateCall(
20405       Stxr, {Builder.CreateZExtOrBitCast(
20406                  Val, Stxr->getFunctionType()->getParamType(0)),
20407              Addr});
20408   CI->addParamAttr(1, Attribute::get(Builder.getContext(),
20409                                      Attribute::ElementType, Val->getType()));
20410   return CI;
20411 }
20412 
20413 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
20414     Type *Ty, CallingConv::ID CallConv, bool isVarArg,
20415     const DataLayout &DL) const {
20416   if (!Ty->isArrayTy()) {
20417     const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
20418     return TySize.isScalable() && TySize.getKnownMinSize() > 128;
20419   }
20420 
20421   // All non aggregate members of the type must have the same type
20422   SmallVector<EVT> ValueVTs;
20423   ComputeValueVTs(*this, DL, Ty, ValueVTs);
20424   return is_splat(ValueVTs);
20425 }
20426 
20427 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
20428                                                             EVT) const {
20429   return false;
20430 }
20431 
20432 static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
20433   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
20434   Function *ThreadPointerFunc =
20435       Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
20436   return IRB.CreatePointerCast(
20437       IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
20438                              Offset),
20439       IRB.getInt8PtrTy()->getPointerTo(0));
20440 }
20441 
20442 Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
20443   // Android provides a fixed TLS slot for the stack cookie. See the definition
20444   // of TLS_SLOT_STACK_GUARD in
20445   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20446   if (Subtarget->isTargetAndroid())
20447     return UseTlsOffset(IRB, 0x28);
20448 
20449   // Fuchsia is similar.
20450   // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
20451   if (Subtarget->isTargetFuchsia())
20452     return UseTlsOffset(IRB, -0x10);
20453 
20454   return TargetLowering::getIRStackGuard(IRB);
20455 }
20456 
20457 void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
20458   // MSVC CRT provides functionalities for stack protection.
20459   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
20460     // MSVC CRT has a global variable holding security cookie.
20461     M.getOrInsertGlobal("__security_cookie",
20462                         Type::getInt8PtrTy(M.getContext()));
20463 
20464     // MSVC CRT has a function to validate security cookie.
20465     FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
20466         "__security_check_cookie", Type::getVoidTy(M.getContext()),
20467         Type::getInt8PtrTy(M.getContext()));
20468     if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
20469       F->setCallingConv(CallingConv::Win64);
20470       F->addParamAttr(0, Attribute::AttrKind::InReg);
20471     }
20472     return;
20473   }
20474   TargetLowering::insertSSPDeclarations(M);
20475 }
20476 
20477 Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
20478   // MSVC CRT has a global variable holding security cookie.
20479   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20480     return M.getGlobalVariable("__security_cookie");
20481   return TargetLowering::getSDagStackGuard(M);
20482 }
20483 
20484 Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
20485   // MSVC CRT has a function to validate security cookie.
20486   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20487     return M.getFunction("__security_check_cookie");
20488   return TargetLowering::getSSPStackGuardCheck(M);
20489 }
20490 
20491 Value *
20492 AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
20493   // Android provides a fixed TLS slot for the SafeStack pointer. See the
20494   // definition of TLS_SLOT_SAFESTACK in
20495   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20496   if (Subtarget->isTargetAndroid())
20497     return UseTlsOffset(IRB, 0x48);
20498 
20499   // Fuchsia is similar.
20500   // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
20501   if (Subtarget->isTargetFuchsia())
20502     return UseTlsOffset(IRB, -0x8);
20503 
20504   return TargetLowering::getSafeStackPointerLocation(IRB);
20505 }
20506 
20507 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
20508     const Instruction &AndI) const {
20509   // Only sink 'and' mask to cmp use block if it is masking a single bit, since
20510   // this is likely to be fold the and/cmp/br into a single tbz instruction.  It
20511   // may be beneficial to sink in other cases, but we would have to check that
20512   // the cmp would not get folded into the br to form a cbz for these to be
20513   // beneficial.
20514   ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
20515   if (!Mask)
20516     return false;
20517   return Mask->getValue().isPowerOf2();
20518 }
20519 
20520 bool AArch64TargetLowering::
20521     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20522         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
20523         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
20524         SelectionDAG &DAG) const {
20525   // Does baseline recommend not to perform the fold by default?
20526   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20527           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
20528     return false;
20529   // Else, if this is a vector shift, prefer 'shl'.
20530   return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL;
20531 }
20532 
20533 bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
20534                                               SDNode *N) const {
20535   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
20536       !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin())
20537     return false;
20538   return true;
20539 }
20540 
20541 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
20542   // Update IsSplitCSR in AArch64unctionInfo.
20543   AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
20544   AFI->setIsSplitCSR(true);
20545 }
20546 
20547 void AArch64TargetLowering::insertCopiesSplitCSR(
20548     MachineBasicBlock *Entry,
20549     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
20550   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
20551   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
20552   if (!IStart)
20553     return;
20554 
20555   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20556   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
20557   MachineBasicBlock::iterator MBBI = Entry->begin();
20558   for (const MCPhysReg *I = IStart; *I; ++I) {
20559     const TargetRegisterClass *RC = nullptr;
20560     if (AArch64::GPR64RegClass.contains(*I))
20561       RC = &AArch64::GPR64RegClass;
20562     else if (AArch64::FPR64RegClass.contains(*I))
20563       RC = &AArch64::FPR64RegClass;
20564     else
20565       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
20566 
20567     Register NewVR = MRI->createVirtualRegister(RC);
20568     // Create copy from CSR to a virtual register.
20569     // FIXME: this currently does not emit CFI pseudo-instructions, it works
20570     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
20571     // nounwind. If we want to generalize this later, we may need to emit
20572     // CFI pseudo-instructions.
20573     assert(Entry->getParent()->getFunction().hasFnAttribute(
20574                Attribute::NoUnwind) &&
20575            "Function should be nounwind in insertCopiesSplitCSR!");
20576     Entry->addLiveIn(*I);
20577     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
20578         .addReg(*I);
20579 
20580     // Insert the copy-back instructions right before the terminator.
20581     for (auto *Exit : Exits)
20582       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
20583               TII->get(TargetOpcode::COPY), *I)
20584           .addReg(NewVR);
20585   }
20586 }
20587 
20588 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
20589   // Integer division on AArch64 is expensive. However, when aggressively
20590   // optimizing for code size, we prefer to use a div instruction, as it is
20591   // usually smaller than the alternative sequence.
20592   // The exception to this is vector division. Since AArch64 doesn't have vector
20593   // integer division, leaving the division as-is is a loss even in terms of
20594   // size, because it will have to be scalarized, while the alternative code
20595   // sequence can be performed in vector form.
20596   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
20597   return OptSize && !VT.isVector();
20598 }
20599 
20600 bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
20601   // We want inc-of-add for scalars and sub-of-not for vectors.
20602   return VT.isScalarInteger();
20603 }
20604 
20605 bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
20606                                                  EVT VT) const {
20607   // v8f16 without fp16 need to be extended to v8f32, which is more difficult to
20608   // legalize.
20609   if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16())
20610     return false;
20611   return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
20612 }
20613 
20614 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
20615   return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
20616 }
20617 
20618 unsigned
20619 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
20620   if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
20621     return getPointerTy(DL).getSizeInBits();
20622 
20623   return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
20624 }
20625 
20626 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
20627   MachineFrameInfo &MFI = MF.getFrameInfo();
20628   // If we have any vulnerable SVE stack objects then the stack protector
20629   // needs to be placed at the top of the SVE stack area, as the SVE locals
20630   // are placed above the other locals, so we allocate it as if it were a
20631   // scalable vector.
20632   // FIXME: It may be worthwhile having a specific interface for this rather
20633   // than doing it here in finalizeLowering.
20634   if (MFI.hasStackProtectorIndex()) {
20635     for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
20636       if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
20637           MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
20638         MFI.setStackID(MFI.getStackProtectorIndex(),
20639                        TargetStackID::ScalableVector);
20640         MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16));
20641         break;
20642       }
20643     }
20644   }
20645   MFI.computeMaxCallFrameSize(MF);
20646   TargetLoweringBase::finalizeLowering(MF);
20647 }
20648 
20649 // Unlike X86, we let frame lowering assign offsets to all catch objects.
20650 bool AArch64TargetLowering::needsFixedCatchObjects() const {
20651   return false;
20652 }
20653 
20654 bool AArch64TargetLowering::shouldLocalize(
20655     const MachineInstr &MI, const TargetTransformInfo *TTI) const {
20656   switch (MI.getOpcode()) {
20657   case TargetOpcode::G_GLOBAL_VALUE: {
20658     // On Darwin, TLS global vars get selected into function calls, which
20659     // we don't want localized, as they can get moved into the middle of a
20660     // another call sequence.
20661     const GlobalValue &GV = *MI.getOperand(1).getGlobal();
20662     if (GV.isThreadLocal() && Subtarget->isTargetMachO())
20663       return false;
20664     break;
20665   }
20666   // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being
20667   // localizable.
20668   case AArch64::ADRP:
20669   case AArch64::G_ADD_LOW:
20670     return true;
20671   default:
20672     break;
20673   }
20674   return TargetLoweringBase::shouldLocalize(MI, TTI);
20675 }
20676 
20677 bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
20678   if (isa<ScalableVectorType>(Inst.getType()))
20679     return true;
20680 
20681   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
20682     if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
20683       return true;
20684 
20685   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
20686     if (isa<ScalableVectorType>(AI->getAllocatedType()))
20687       return true;
20688   }
20689 
20690   return false;
20691 }
20692 
20693 // Return the largest legal scalable vector type that matches VT's element type.
20694 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
20695   assert(VT.isFixedLengthVector() &&
20696          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20697          "Expected legal fixed length vector!");
20698   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20699   default:
20700     llvm_unreachable("unexpected element type for SVE container");
20701   case MVT::i8:
20702     return EVT(MVT::nxv16i8);
20703   case MVT::i16:
20704     return EVT(MVT::nxv8i16);
20705   case MVT::i32:
20706     return EVT(MVT::nxv4i32);
20707   case MVT::i64:
20708     return EVT(MVT::nxv2i64);
20709   case MVT::f16:
20710     return EVT(MVT::nxv8f16);
20711   case MVT::f32:
20712     return EVT(MVT::nxv4f32);
20713   case MVT::f64:
20714     return EVT(MVT::nxv2f64);
20715   }
20716 }
20717 
20718 // Return a PTRUE with active lanes corresponding to the extent of VT.
20719 static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
20720                                                 EVT VT) {
20721   assert(VT.isFixedLengthVector() &&
20722          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20723          "Expected legal fixed length vector!");
20724 
20725   Optional<unsigned> PgPattern =
20726       getSVEPredPatternFromNumElements(VT.getVectorNumElements());
20727   assert(PgPattern && "Unexpected element count for SVE predicate");
20728 
20729   // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
20730   // AArch64SVEPredPattern::all, which can enable the use of unpredicated
20731   // variants of instructions when available.
20732   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
20733   unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
20734   unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
20735   if (MaxSVESize && MinSVESize == MaxSVESize &&
20736       MaxSVESize == VT.getSizeInBits())
20737     PgPattern = AArch64SVEPredPattern::all;
20738 
20739   MVT MaskVT;
20740   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20741   default:
20742     llvm_unreachable("unexpected element type for SVE predicate");
20743   case MVT::i8:
20744     MaskVT = MVT::nxv16i1;
20745     break;
20746   case MVT::i16:
20747   case MVT::f16:
20748     MaskVT = MVT::nxv8i1;
20749     break;
20750   case MVT::i32:
20751   case MVT::f32:
20752     MaskVT = MVT::nxv4i1;
20753     break;
20754   case MVT::i64:
20755   case MVT::f64:
20756     MaskVT = MVT::nxv2i1;
20757     break;
20758   }
20759 
20760   return getPTrue(DAG, DL, MaskVT, *PgPattern);
20761 }
20762 
20763 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
20764                                              EVT VT) {
20765   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20766          "Expected legal scalable vector!");
20767   auto PredTy = VT.changeVectorElementType(MVT::i1);
20768   return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
20769 }
20770 
20771 static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
20772   if (VT.isFixedLengthVector())
20773     return getPredicateForFixedLengthVector(DAG, DL, VT);
20774 
20775   return getPredicateForScalableVector(DAG, DL, VT);
20776 }
20777 
20778 // Grow V to consume an entire SVE register.
20779 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20780   assert(VT.isScalableVector() &&
20781          "Expected to convert into a scalable vector!");
20782   assert(V.getValueType().isFixedLengthVector() &&
20783          "Expected a fixed length vector operand!");
20784   SDLoc DL(V);
20785   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20786   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
20787 }
20788 
20789 // Shrink V so it's just big enough to maintain a VT's worth of data.
20790 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20791   assert(VT.isFixedLengthVector() &&
20792          "Expected to convert into a fixed length vector!");
20793   assert(V.getValueType().isScalableVector() &&
20794          "Expected a scalable vector operand!");
20795   SDLoc DL(V);
20796   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20797   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
20798 }
20799 
20800 // Convert all fixed length vector loads larger than NEON to masked_loads.
20801 SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
20802     SDValue Op, SelectionDAG &DAG) const {
20803   auto Load = cast<LoadSDNode>(Op);
20804 
20805   SDLoc DL(Op);
20806   EVT VT = Op.getValueType();
20807   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20808   EVT LoadVT = ContainerVT;
20809   EVT MemVT = Load->getMemoryVT();
20810 
20811   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
20812 
20813   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
20814     LoadVT = ContainerVT.changeTypeToInteger();
20815     MemVT = MemVT.changeTypeToInteger();
20816   }
20817 
20818   SDValue NewLoad = DAG.getMaskedLoad(
20819       LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg,
20820       DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(),
20821       Load->getAddressingMode(), Load->getExtensionType());
20822 
20823   SDValue Result = NewLoad;
20824   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
20825     EVT ExtendVT = ContainerVT.changeVectorElementType(
20826         Load->getMemoryVT().getVectorElementType());
20827 
20828     Result = getSVESafeBitCast(ExtendVT, Result, DAG);
20829     Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
20830                          Pg, Result, DAG.getUNDEF(ContainerVT));
20831   }
20832 
20833   Result = convertFromScalableVector(DAG, VT, Result);
20834   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
20835   return DAG.getMergeValues(MergedValues, DL);
20836 }
20837 
20838 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
20839                                                 SelectionDAG &DAG) {
20840   SDLoc DL(Mask);
20841   EVT InVT = Mask.getValueType();
20842   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
20843 
20844   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
20845 
20846   if (ISD::isBuildVectorAllOnes(Mask.getNode()))
20847     return Pg;
20848 
20849   auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask);
20850   auto Op2 = DAG.getConstant(0, DL, ContainerVT);
20851 
20852   return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(),
20853                      {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)});
20854 }
20855 
20856 // Convert all fixed length vector loads larger than NEON to masked_loads.
20857 SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
20858     SDValue Op, SelectionDAG &DAG) const {
20859   auto Load = cast<MaskedLoadSDNode>(Op);
20860 
20861   SDLoc DL(Op);
20862   EVT VT = Op.getValueType();
20863   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20864 
20865   SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG);
20866 
20867   SDValue PassThru;
20868   bool IsPassThruZeroOrUndef = false;
20869 
20870   if (Load->getPassThru()->isUndef()) {
20871     PassThru = DAG.getUNDEF(ContainerVT);
20872     IsPassThruZeroOrUndef = true;
20873   } else {
20874     if (ContainerVT.isInteger())
20875       PassThru = DAG.getConstant(0, DL, ContainerVT);
20876     else
20877       PassThru = DAG.getConstantFP(0, DL, ContainerVT);
20878     if (isZerosVector(Load->getPassThru().getNode()))
20879       IsPassThruZeroOrUndef = true;
20880   }
20881 
20882   SDValue NewLoad = DAG.getMaskedLoad(
20883       ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(),
20884       Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(),
20885       Load->getAddressingMode(), Load->getExtensionType());
20886 
20887   SDValue Result = NewLoad;
20888   if (!IsPassThruZeroOrUndef) {
20889     SDValue OldPassThru =
20890         convertToScalableVector(DAG, ContainerVT, Load->getPassThru());
20891     Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru);
20892   }
20893 
20894   Result = convertFromScalableVector(DAG, VT, Result);
20895   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
20896   return DAG.getMergeValues(MergedValues, DL);
20897 }
20898 
20899 // Convert all fixed length vector stores larger than NEON to masked_stores.
20900 SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
20901     SDValue Op, SelectionDAG &DAG) const {
20902   auto Store = cast<StoreSDNode>(Op);
20903 
20904   SDLoc DL(Op);
20905   EVT VT = Store->getValue().getValueType();
20906   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20907   EVT MemVT = Store->getMemoryVT();
20908 
20909   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
20910   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
20911 
20912   if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
20913     EVT TruncVT = ContainerVT.changeVectorElementType(
20914         Store->getMemoryVT().getVectorElementType());
20915     MemVT = MemVT.changeTypeToInteger();
20916     NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
20917                            NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
20918                            DAG.getUNDEF(TruncVT));
20919     NewValue =
20920         getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
20921   }
20922 
20923   return DAG.getMaskedStore(Store->getChain(), DL, NewValue,
20924                             Store->getBasePtr(), Store->getOffset(), Pg, MemVT,
20925                             Store->getMemOperand(), Store->getAddressingMode(),
20926                             Store->isTruncatingStore());
20927 }
20928 
20929 SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
20930     SDValue Op, SelectionDAG &DAG) const {
20931   auto *Store = cast<MaskedStoreSDNode>(Op);
20932 
20933   SDLoc DL(Op);
20934   EVT VT = Store->getValue().getValueType();
20935   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20936 
20937   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
20938   SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG);
20939 
20940   return DAG.getMaskedStore(
20941       Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(),
20942       Mask, Store->getMemoryVT(), Store->getMemOperand(),
20943       Store->getAddressingMode(), Store->isTruncatingStore());
20944 }
20945 
20946 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
20947     SDValue Op, SelectionDAG &DAG) const {
20948   SDLoc dl(Op);
20949   EVT VT = Op.getValueType();
20950   EVT EltVT = VT.getVectorElementType();
20951 
20952   bool Signed = Op.getOpcode() == ISD::SDIV;
20953   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
20954 
20955   bool Negated;
20956   uint64_t SplatVal;
20957   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
20958     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20959     SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
20960     SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32);
20961 
20962     SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT);
20963     SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2);
20964     if (Negated)
20965       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
20966 
20967     return convertFromScalableVector(DAG, VT, Res);
20968   }
20969 
20970   // Scalable vector i32/i64 DIV is supported.
20971   if (EltVT == MVT::i32 || EltVT == MVT::i64)
20972     return LowerToPredicatedOp(Op, DAG, PredOpcode);
20973 
20974   // Scalable vector i8/i16 DIV is not supported. Promote it to i32.
20975   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20976   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
20977   EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext());
20978   EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT);
20979 
20980   // If this is not a full vector, extend, div, and truncate it.
20981   EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext());
20982   if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) {
20983     unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20984     SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0));
20985     SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1));
20986     SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1);
20987     return DAG.getNode(ISD::TRUNCATE, dl, VT, Div);
20988   }
20989 
20990   // Convert the operands to scalable vectors.
20991   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
20992   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
20993 
20994   // Extend the scalable operands.
20995   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
20996   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
20997   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0);
20998   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1);
20999   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0);
21000   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1);
21001 
21002   // Convert back to fixed vectors so the DIV can be further lowered.
21003   Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo);
21004   Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo);
21005   Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi);
21006   Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi);
21007   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21008                                  Op0Lo, Op1Lo);
21009   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21010                                  Op0Hi, Op1Hi);
21011 
21012   // Convert again to scalable vectors to truncate.
21013   ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo);
21014   ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi);
21015   SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT,
21016                                        ResultLo, ResultHi);
21017 
21018   return convertFromScalableVector(DAG, VT, ScalableResult);
21019 }
21020 
21021 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
21022     SDValue Op, SelectionDAG &DAG) const {
21023   EVT VT = Op.getValueType();
21024   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21025 
21026   SDLoc DL(Op);
21027   SDValue Val = Op.getOperand(0);
21028   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21029   Val = convertToScalableVector(DAG, ContainerVT, Val);
21030 
21031   bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND;
21032   unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
21033 
21034   // Repeatedly unpack Val until the result is of the desired element type.
21035   switch (ContainerVT.getSimpleVT().SimpleTy) {
21036   default:
21037     llvm_unreachable("unimplemented container type");
21038   case MVT::nxv16i8:
21039     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
21040     if (VT.getVectorElementType() == MVT::i16)
21041       break;
21042     LLVM_FALLTHROUGH;
21043   case MVT::nxv8i16:
21044     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
21045     if (VT.getVectorElementType() == MVT::i32)
21046       break;
21047     LLVM_FALLTHROUGH;
21048   case MVT::nxv4i32:
21049     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
21050     assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
21051     break;
21052   }
21053 
21054   return convertFromScalableVector(DAG, VT, Val);
21055 }
21056 
21057 SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
21058     SDValue Op, SelectionDAG &DAG) const {
21059   EVT VT = Op.getValueType();
21060   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21061 
21062   SDLoc DL(Op);
21063   SDValue Val = Op.getOperand(0);
21064   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21065   Val = convertToScalableVector(DAG, ContainerVT, Val);
21066 
21067   // Repeatedly truncate Val until the result is of the desired element type.
21068   switch (ContainerVT.getSimpleVT().SimpleTy) {
21069   default:
21070     llvm_unreachable("unimplemented container type");
21071   case MVT::nxv2i64:
21072     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val);
21073     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
21074     if (VT.getVectorElementType() == MVT::i32)
21075       break;
21076     LLVM_FALLTHROUGH;
21077   case MVT::nxv4i32:
21078     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
21079     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
21080     if (VT.getVectorElementType() == MVT::i16)
21081       break;
21082     LLVM_FALLTHROUGH;
21083   case MVT::nxv8i16:
21084     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
21085     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
21086     assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!");
21087     break;
21088   }
21089 
21090   return convertFromScalableVector(DAG, VT, Val);
21091 }
21092 
21093 SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
21094     SDValue Op, SelectionDAG &DAG) const {
21095   EVT VT = Op.getValueType();
21096   EVT InVT = Op.getOperand(0).getValueType();
21097   assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!");
21098 
21099   SDLoc DL(Op);
21100   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21101   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21102 
21103   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1));
21104 }
21105 
21106 SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
21107     SDValue Op, SelectionDAG &DAG) const {
21108   EVT VT = Op.getValueType();
21109   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21110 
21111   SDLoc DL(Op);
21112   EVT InVT = Op.getOperand(0).getValueType();
21113   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21114   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21115 
21116   auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0,
21117                                  Op.getOperand(1), Op.getOperand(2));
21118 
21119   return convertFromScalableVector(DAG, VT, ScalableRes);
21120 }
21121 
21122 // Convert vector operation 'Op' to an equivalent predicated operation whereby
21123 // the original operation's type is used to construct a suitable predicate.
21124 // NOTE: The results for inactive lanes are undefined.
21125 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
21126                                                    SelectionDAG &DAG,
21127                                                    unsigned NewOp) const {
21128   EVT VT = Op.getValueType();
21129   SDLoc DL(Op);
21130   auto Pg = getPredicateForVector(DAG, DL, VT);
21131 
21132   if (VT.isFixedLengthVector()) {
21133     assert(isTypeLegal(VT) && "Expected only legal fixed-width types");
21134     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21135 
21136     // Create list of operands by converting existing ones to scalable types.
21137     SmallVector<SDValue, 4> Operands = {Pg};
21138     for (const SDValue &V : Op->op_values()) {
21139       if (isa<CondCodeSDNode>(V)) {
21140         Operands.push_back(V);
21141         continue;
21142       }
21143 
21144       if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
21145         EVT VTArg = VTNode->getVT().getVectorElementType();
21146         EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
21147         Operands.push_back(DAG.getValueType(NewVTArg));
21148         continue;
21149       }
21150 
21151       assert(isTypeLegal(V.getValueType()) &&
21152              "Expected only legal fixed-width types");
21153       Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
21154     }
21155 
21156     if (isMergePassthruOpcode(NewOp))
21157       Operands.push_back(DAG.getUNDEF(ContainerVT));
21158 
21159     auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
21160     return convertFromScalableVector(DAG, VT, ScalableRes);
21161   }
21162 
21163   assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
21164 
21165   SmallVector<SDValue, 4> Operands = {Pg};
21166   for (const SDValue &V : Op->op_values()) {
21167     assert((!V.getValueType().isVector() ||
21168             V.getValueType().isScalableVector()) &&
21169            "Only scalable vectors are supported!");
21170     Operands.push_back(V);
21171   }
21172 
21173   if (isMergePassthruOpcode(NewOp))
21174     Operands.push_back(DAG.getUNDEF(VT));
21175 
21176   return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags());
21177 }
21178 
21179 // If a fixed length vector operation has no side effects when applied to
21180 // undefined elements, we can safely use scalable vectors to perform the same
21181 // operation without needing to worry about predication.
21182 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
21183                                                  SelectionDAG &DAG) const {
21184   EVT VT = Op.getValueType();
21185   assert(useSVEForFixedLengthVectorVT(VT) &&
21186          "Only expected to lower fixed length vector operation!");
21187   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21188 
21189   // Create list of operands by converting existing ones to scalable types.
21190   SmallVector<SDValue, 4> Ops;
21191   for (const SDValue &V : Op->op_values()) {
21192     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
21193 
21194     // Pass through non-vector operands.
21195     if (!V.getValueType().isVector()) {
21196       Ops.push_back(V);
21197       continue;
21198     }
21199 
21200     // "cast" fixed length vector to a scalable vector.
21201     assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
21202            "Only fixed length vectors are supported!");
21203     Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
21204   }
21205 
21206   auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops);
21207   return convertFromScalableVector(DAG, VT, ScalableRes);
21208 }
21209 
21210 SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
21211     SelectionDAG &DAG) const {
21212   SDLoc DL(ScalarOp);
21213   SDValue AccOp = ScalarOp.getOperand(0);
21214   SDValue VecOp = ScalarOp.getOperand(1);
21215   EVT SrcVT = VecOp.getValueType();
21216   EVT ResVT = SrcVT.getVectorElementType();
21217 
21218   EVT ContainerVT = SrcVT;
21219   if (SrcVT.isFixedLengthVector()) {
21220     ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21221     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21222   }
21223 
21224   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21225   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21226 
21227   // Convert operands to Scalable.
21228   AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
21229                       DAG.getUNDEF(ContainerVT), AccOp, Zero);
21230 
21231   // Perform reduction.
21232   SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
21233                             Pg, AccOp, VecOp);
21234 
21235   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero);
21236 }
21237 
21238 SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
21239                                                        SelectionDAG &DAG) const {
21240   SDLoc DL(ReduceOp);
21241   SDValue Op = ReduceOp.getOperand(0);
21242   EVT OpVT = Op.getValueType();
21243   EVT VT = ReduceOp.getValueType();
21244 
21245   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
21246     return SDValue();
21247 
21248   SDValue Pg = getPredicateForVector(DAG, DL, OpVT);
21249 
21250   switch (ReduceOp.getOpcode()) {
21251   default:
21252     return SDValue();
21253   case ISD::VECREDUCE_OR:
21254     if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1)
21255       // The predicate can be 'Op' because
21256       // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
21257       return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
21258     else
21259       return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
21260   case ISD::VECREDUCE_AND: {
21261     Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
21262     return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
21263   }
21264   case ISD::VECREDUCE_XOR: {
21265     SDValue ID =
21266         DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64);
21267     if (OpVT == MVT::nxv1i1) {
21268       // Emulate a CNTP on .Q using .D and a different governing predicate.
21269       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg);
21270       Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op);
21271     }
21272     SDValue Cntp =
21273         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op);
21274     return DAG.getAnyExtOrTrunc(Cntp, DL, VT);
21275   }
21276   }
21277 
21278   return SDValue();
21279 }
21280 
21281 SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
21282                                                    SDValue ScalarOp,
21283                                                    SelectionDAG &DAG) const {
21284   SDLoc DL(ScalarOp);
21285   SDValue VecOp = ScalarOp.getOperand(0);
21286   EVT SrcVT = VecOp.getValueType();
21287 
21288   if (useSVEForFixedLengthVectorVT(
21289           SrcVT,
21290           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
21291     EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21292     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21293   }
21294 
21295   // UADDV always returns an i64 result.
21296   EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
21297                                                    SrcVT.getVectorElementType();
21298   EVT RdxVT = SrcVT;
21299   if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
21300     RdxVT = getPackedSVEVectorVT(ResVT);
21301 
21302   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21303   SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
21304   SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
21305                             Rdx, DAG.getConstant(0, DL, MVT::i64));
21306 
21307   // The VEC_REDUCE nodes expect an element size result.
21308   if (ResVT != ScalarOp.getValueType())
21309     Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
21310 
21311   return Res;
21312 }
21313 
21314 SDValue
21315 AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
21316     SelectionDAG &DAG) const {
21317   EVT VT = Op.getValueType();
21318   SDLoc DL(Op);
21319 
21320   EVT InVT = Op.getOperand(1).getValueType();
21321   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21322   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1));
21323   SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2));
21324 
21325   // Convert the mask to a predicated (NOTE: We don't need to worry about
21326   // inactive lanes since VSELECT is safe when given undefined elements).
21327   EVT MaskVT = Op.getOperand(0).getValueType();
21328   EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
21329   auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
21330   Mask = DAG.getNode(ISD::TRUNCATE, DL,
21331                      MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
21332 
21333   auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
21334                                 Mask, Op1, Op2);
21335 
21336   return convertFromScalableVector(DAG, VT, ScalableRes);
21337 }
21338 
21339 SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
21340     SDValue Op, SelectionDAG &DAG) const {
21341   SDLoc DL(Op);
21342   EVT InVT = Op.getOperand(0).getValueType();
21343   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21344 
21345   assert(useSVEForFixedLengthVectorVT(InVT) &&
21346          "Only expected to lower fixed length vector operation!");
21347   assert(Op.getValueType() == InVT.changeTypeToInteger() &&
21348          "Expected integer result of the same bit length as the inputs!");
21349 
21350   auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21351   auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21352   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21353 
21354   EVT CmpVT = Pg.getValueType();
21355   auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT,
21356                          {Pg, Op1, Op2, Op.getOperand(2)});
21357 
21358   EVT PromoteVT = ContainerVT.changeTypeToInteger();
21359   auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT);
21360   return convertFromScalableVector(DAG, Op.getValueType(), Promote);
21361 }
21362 
21363 SDValue
21364 AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
21365                                                     SelectionDAG &DAG) const {
21366   SDLoc DL(Op);
21367   auto SrcOp = Op.getOperand(0);
21368   EVT VT = Op.getValueType();
21369   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21370   EVT ContainerSrcVT =
21371       getContainerForFixedLengthVector(DAG, SrcOp.getValueType());
21372 
21373   SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp);
21374   Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp);
21375   return convertFromScalableVector(DAG, VT, Op);
21376 }
21377 
21378 SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
21379     SDValue Op, SelectionDAG &DAG) const {
21380   SDLoc DL(Op);
21381   unsigned NumOperands = Op->getNumOperands();
21382 
21383   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
21384          "Unexpected number of operands in CONCAT_VECTORS");
21385 
21386   auto SrcOp1 = Op.getOperand(0);
21387   auto SrcOp2 = Op.getOperand(1);
21388   EVT VT = Op.getValueType();
21389   EVT SrcVT = SrcOp1.getValueType();
21390 
21391   if (NumOperands > 2) {
21392     SmallVector<SDValue, 4> Ops;
21393     EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
21394     for (unsigned I = 0; I < NumOperands; I += 2)
21395       Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT,
21396                                 Op->getOperand(I), Op->getOperand(I + 1)));
21397 
21398     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
21399   }
21400 
21401   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21402 
21403   SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
21404   SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1);
21405   SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2);
21406 
21407   Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2);
21408 
21409   return convertFromScalableVector(DAG, VT, Op);
21410 }
21411 
21412 SDValue
21413 AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
21414                                                      SelectionDAG &DAG) const {
21415   EVT VT = Op.getValueType();
21416   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21417 
21418   SDLoc DL(Op);
21419   SDValue Val = Op.getOperand(0);
21420   SDValue Pg = getPredicateForVector(DAG, DL, VT);
21421   EVT SrcVT = Val.getValueType();
21422   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21423   EVT ExtendVT = ContainerVT.changeVectorElementType(
21424       SrcVT.getVectorElementType());
21425 
21426   Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21427   Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
21428 
21429   Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val);
21430   Val = getSVESafeBitCast(ExtendVT, Val, DAG);
21431   Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21432                     Pg, Val, DAG.getUNDEF(ContainerVT));
21433 
21434   return convertFromScalableVector(DAG, VT, Val);
21435 }
21436 
21437 SDValue
21438 AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
21439                                                     SelectionDAG &DAG) const {
21440   EVT VT = Op.getValueType();
21441   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21442 
21443   SDLoc DL(Op);
21444   SDValue Val = Op.getOperand(0);
21445   EVT SrcVT = Val.getValueType();
21446   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21447   EVT RoundVT = ContainerSrcVT.changeVectorElementType(
21448       VT.getVectorElementType());
21449   SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
21450 
21451   Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21452   Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val,
21453                     Op.getOperand(1), DAG.getUNDEF(RoundVT));
21454   Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG);
21455   Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21456 
21457   Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21458   return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21459 }
21460 
21461 SDValue
21462 AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
21463                                                     SelectionDAG &DAG) const {
21464   EVT VT = Op.getValueType();
21465   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21466 
21467   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
21468   unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
21469                              : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
21470 
21471   SDLoc DL(Op);
21472   SDValue Val = Op.getOperand(0);
21473   EVT SrcVT = Val.getValueType();
21474   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21475   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21476 
21477   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21478       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21479     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21480 
21481     Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
21482                       VT.changeTypeToInteger(), Val);
21483 
21484     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21485     Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG);
21486     // Safe to use a larger than specified operand since we just unpacked the
21487     // data, hence the upper bits are zero.
21488     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21489                       DAG.getUNDEF(ContainerDstVT));
21490     return convertFromScalableVector(DAG, VT, Val);
21491   } else {
21492     EVT CvtVT = ContainerSrcVT.changeVectorElementType(
21493         ContainerDstVT.getVectorElementType());
21494     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21495 
21496     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21497     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21498     Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG);
21499     Val = convertFromScalableVector(DAG, SrcVT, Val);
21500 
21501     Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21502     return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21503   }
21504 }
21505 
21506 SDValue
21507 AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
21508                                                     SelectionDAG &DAG) const {
21509   EVT VT = Op.getValueType();
21510   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21511 
21512   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
21513   unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU
21514                              : AArch64ISD::FCVTZU_MERGE_PASSTHRU;
21515 
21516   SDLoc DL(Op);
21517   SDValue Val = Op.getOperand(0);
21518   EVT SrcVT = Val.getValueType();
21519   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21520   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21521 
21522   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21523       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21524     EVT CvtVT = ContainerDstVT.changeVectorElementType(
21525       ContainerSrcVT.getVectorElementType());
21526     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21527 
21528     Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21529     Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val);
21530 
21531     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21532     Val = getSVESafeBitCast(CvtVT, Val, DAG);
21533     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21534                       DAG.getUNDEF(ContainerDstVT));
21535     return convertFromScalableVector(DAG, VT, Val);
21536   } else {
21537     EVT CvtVT = ContainerSrcVT.changeTypeToInteger();
21538     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21539 
21540     // Safe to use a larger than specified result since an fp_to_int where the
21541     // result doesn't fit into the destination is undefined.
21542     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21543     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21544     Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21545 
21546     return DAG.getNode(ISD::TRUNCATE, DL, VT, Val);
21547   }
21548 }
21549 
21550 SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
21551     SDValue Op, SelectionDAG &DAG) const {
21552   EVT VT = Op.getValueType();
21553   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21554 
21555   auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
21556   auto ShuffleMask = SVN->getMask();
21557 
21558   SDLoc DL(Op);
21559   SDValue Op1 = Op.getOperand(0);
21560   SDValue Op2 = Op.getOperand(1);
21561 
21562   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21563   Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
21564   Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
21565 
21566   bool ReverseEXT = false;
21567   unsigned Imm;
21568   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
21569       Imm == VT.getVectorNumElements() - 1) {
21570     if (ReverseEXT)
21571       std::swap(Op1, Op2);
21572 
21573     EVT ScalarTy = VT.getVectorElementType();
21574     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
21575       ScalarTy = MVT::i32;
21576     SDValue Scalar = DAG.getNode(
21577         ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
21578         DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64));
21579     Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar);
21580     return convertFromScalableVector(DAG, VT, Op);
21581   }
21582 
21583   for (unsigned LaneSize : {64U, 32U, 16U}) {
21584     if (isREVMask(ShuffleMask, VT, LaneSize)) {
21585       EVT NewVT =
21586           getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize));
21587       unsigned RevOp;
21588       unsigned EltSz = VT.getScalarSizeInBits();
21589       if (EltSz == 8)
21590         RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU;
21591       else if (EltSz == 16)
21592         RevOp = AArch64ISD::REVH_MERGE_PASSTHRU;
21593       else
21594         RevOp = AArch64ISD::REVW_MERGE_PASSTHRU;
21595 
21596       Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
21597       Op = LowerToPredicatedOp(Op, DAG, RevOp);
21598       Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
21599       return convertFromScalableVector(DAG, VT, Op);
21600     }
21601   }
21602 
21603   unsigned WhichResult;
21604   if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21605     return convertFromScalableVector(
21606         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2));
21607 
21608   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
21609     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21610     return convertFromScalableVector(
21611         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21612   }
21613 
21614   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21615     return convertFromScalableVector(
21616         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1));
21617 
21618   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21619     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21620     return convertFromScalableVector(
21621         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21622   }
21623 
21624   // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask
21625   // represents the same logical operation as performed by a ZIP instruction. In
21626   // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly
21627   // equivalent to an AArch64 instruction. There's the extra component of
21628   // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions
21629   // only operated on 64/128bit vector types that have a direct mapping to a
21630   // target register and so an exact mapping is implied.
21631   // However, when using SVE for fixed length vectors, most legal vector types
21632   // are actually sub-vectors of a larger SVE register. When mapping
21633   // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider
21634   // how the mask's indices translate. Specifically, when the mapping requires
21635   // an exact meaning for a specific vector index (e.g. Index X is the last
21636   // vector element in the register) then such mappings are often only safe when
21637   // the exact SVE register size is know. The main exception to this is when
21638   // indices are logically relative to the first element of either
21639   // ISD::VECTOR_SHUFFLE operand because these relative indices don't change
21640   // when converting from fixed-length to scalable vector types (i.e. the start
21641   // of a fixed length vector is always the start of a scalable vector).
21642   unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
21643   unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
21644   if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
21645     if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) {
21646       Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
21647       return convertFromScalableVector(DAG, VT, Op);
21648     }
21649 
21650     if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21651       return convertFromScalableVector(
21652           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2));
21653 
21654     if (isUZPMask(ShuffleMask, VT, WhichResult)) {
21655       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21656       return convertFromScalableVector(
21657           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21658     }
21659 
21660     if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21661       return convertFromScalableVector(
21662           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1));
21663 
21664     if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21665       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21666       return convertFromScalableVector(
21667           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21668     }
21669   }
21670 
21671   return SDValue();
21672 }
21673 
21674 SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
21675                                                  SelectionDAG &DAG) const {
21676   SDLoc DL(Op);
21677   EVT InVT = Op.getValueType();
21678 
21679   assert(VT.isScalableVector() && isTypeLegal(VT) &&
21680          InVT.isScalableVector() && isTypeLegal(InVT) &&
21681          "Only expect to cast between legal scalable vector types!");
21682   assert(VT.getVectorElementType() != MVT::i1 &&
21683          InVT.getVectorElementType() != MVT::i1 &&
21684          "For predicate bitcasts, use getSVEPredicateBitCast");
21685 
21686   if (InVT == VT)
21687     return Op;
21688 
21689   EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType());
21690   EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType());
21691 
21692   // Safe bitcasting between unpacked vector types of different element counts
21693   // is currently unsupported because the following is missing the necessary
21694   // work to ensure the result's elements live where they're supposed to within
21695   // an SVE register.
21696   //                01234567
21697   // e.g. nxv2i32 = XX??XX??
21698   //      nxv4f16 = X?X?X?X?
21699   assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||
21700           VT == PackedVT || InVT == PackedInVT) &&
21701          "Unexpected bitcast!");
21702 
21703   // Pack input if required.
21704   if (InVT != PackedInVT)
21705     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
21706 
21707   Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
21708 
21709   // Unpack result if required.
21710   if (VT != PackedVT)
21711     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
21712 
21713   return Op;
21714 }
21715 
21716 bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
21717                                                  SDValue N) const {
21718   return ::isAllActivePredicate(DAG, N);
21719 }
21720 
21721 EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
21722   return ::getPromotedVTForPredicate(VT);
21723 }
21724 
21725 bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
21726     SDValue Op, const APInt &OriginalDemandedBits,
21727     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
21728     unsigned Depth) const {
21729 
21730   unsigned Opc = Op.getOpcode();
21731   switch (Opc) {
21732   case AArch64ISD::VSHL: {
21733     // Match (VSHL (VLSHR Val X) X)
21734     SDValue ShiftL = Op;
21735     SDValue ShiftR = Op->getOperand(0);
21736     if (ShiftR->getOpcode() != AArch64ISD::VLSHR)
21737       return false;
21738 
21739     if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse())
21740       return false;
21741 
21742     unsigned ShiftLBits = ShiftL->getConstantOperandVal(1);
21743     unsigned ShiftRBits = ShiftR->getConstantOperandVal(1);
21744 
21745     // Other cases can be handled as well, but this is not
21746     // implemented.
21747     if (ShiftRBits != ShiftLBits)
21748       return false;
21749 
21750     unsigned ScalarSize = Op.getScalarValueSizeInBits();
21751     assert(ScalarSize > ShiftLBits && "Invalid shift imm");
21752 
21753     APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits);
21754     APInt UnusedBits = ~OriginalDemandedBits;
21755 
21756     if ((ZeroBits & UnusedBits) != ZeroBits)
21757       return false;
21758 
21759     // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not
21760     // used - simplify to just Val.
21761     return TLO.CombineTo(Op, ShiftR->getOperand(0));
21762   }
21763   }
21764 
21765   return TargetLowering::SimplifyDemandedBitsForTargetNode(
21766       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
21767 }
21768 
21769 bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
21770   return Op.getOpcode() == AArch64ISD::DUP ||
21771          (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
21772           Op.getOperand(0).getOpcode() == AArch64ISD::DUP) ||
21773          TargetLowering::isTargetCanonicalConstantNode(Op);
21774 }
21775 
21776 bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
21777     unsigned Opc, LLT Ty1, LLT Ty2) const {
21778   return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
21779 }
21780