1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation  ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64ISelLowering.h"
14 #include "AArch64CallingConvention.h"
15 #include "AArch64ExpandImm.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64PerfectShuffle.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/ObjCARCUtil.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/CallingConvLower.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstr.h"
42 #include "llvm/CodeGen/MachineInstrBuilder.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/CodeGen/RuntimeLibcalls.h"
46 #include "llvm/CodeGen/SelectionDAG.h"
47 #include "llvm/CodeGen/SelectionDAGNodes.h"
48 #include "llvm/CodeGen/TargetCallingConv.h"
49 #include "llvm/CodeGen/TargetInstrInfo.h"
50 #include "llvm/CodeGen/ValueTypes.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GetElementPtrTypeIterator.h"
58 #include "llvm/IR/GlobalValue.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/IntrinsicsAArch64.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/OperandTraits.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/MC/MCRegisterInfo.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CodeGen.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Compiler.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/KnownBits.h"
79 #include "llvm/Support/MachineValueType.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetMachine.h"
83 #include "llvm/Target/TargetOptions.h"
84 #include <algorithm>
85 #include <bitset>
86 #include <cassert>
87 #include <cctype>
88 #include <cstdint>
89 #include <cstdlib>
90 #include <iterator>
91 #include <limits>
92 #include <tuple>
93 #include <utility>
94 #include <vector>
95 
96 using namespace llvm;
97 using namespace llvm::PatternMatch;
98 
99 #define DEBUG_TYPE "aarch64-lower"
100 
101 STATISTIC(NumTailCalls, "Number of tail calls");
102 STATISTIC(NumShiftInserts, "Number of vector shift inserts");
103 STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
104 
105 // FIXME: The necessary dtprel relocations don't seem to be supported
106 // well in the GNU bfd and gold linkers at the moment. Therefore, by
107 // default, for now, fall back to GeneralDynamic code generation.
108 cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
109     "aarch64-elf-ldtls-generation", cl::Hidden,
110     cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
111     cl::init(false));
112 
113 static cl::opt<bool>
114 EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
115                          cl::desc("Enable AArch64 logical imm instruction "
116                                   "optimization"),
117                          cl::init(true));
118 
119 // Temporary option added for the purpose of testing functionality added
120 // to DAGCombiner.cpp in D92230. It is expected that this can be removed
121 // in future when both implementations will be based off MGATHER rather
122 // than the GLD1 nodes added for the SVE gather load intrinsics.
123 static cl::opt<bool>
124 EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
125                                 cl::desc("Combine extends of AArch64 masked "
126                                          "gather intrinsics"),
127                                 cl::init(true));
128 
129 /// Value type used for condition codes.
130 static const MVT MVT_CC = MVT::i32;
131 
132 static inline EVT getPackedSVEVectorVT(EVT VT) {
133   switch (VT.getSimpleVT().SimpleTy) {
134   default:
135     llvm_unreachable("unexpected element type for vector");
136   case MVT::i8:
137     return MVT::nxv16i8;
138   case MVT::i16:
139     return MVT::nxv8i16;
140   case MVT::i32:
141     return MVT::nxv4i32;
142   case MVT::i64:
143     return MVT::nxv2i64;
144   case MVT::f16:
145     return MVT::nxv8f16;
146   case MVT::f32:
147     return MVT::nxv4f32;
148   case MVT::f64:
149     return MVT::nxv2f64;
150   case MVT::bf16:
151     return MVT::nxv8bf16;
152   }
153 }
154 
155 // NOTE: Currently there's only a need to return integer vector types. If this
156 // changes then just add an extra "type" parameter.
157 static inline EVT getPackedSVEVectorVT(ElementCount EC) {
158   switch (EC.getKnownMinValue()) {
159   default:
160     llvm_unreachable("unexpected element count for vector");
161   case 16:
162     return MVT::nxv16i8;
163   case 8:
164     return MVT::nxv8i16;
165   case 4:
166     return MVT::nxv4i32;
167   case 2:
168     return MVT::nxv2i64;
169   }
170 }
171 
172 static inline EVT getPromotedVTForPredicate(EVT VT) {
173   assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&
174          "Expected scalable predicate vector type!");
175   switch (VT.getVectorMinNumElements()) {
176   default:
177     llvm_unreachable("unexpected element count for vector");
178   case 2:
179     return MVT::nxv2i64;
180   case 4:
181     return MVT::nxv4i32;
182   case 8:
183     return MVT::nxv8i16;
184   case 16:
185     return MVT::nxv16i8;
186   }
187 }
188 
189 /// Returns true if VT's elements occupy the lowest bit positions of its
190 /// associated register class without any intervening space.
191 ///
192 /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
193 /// same register class, but only nxv8f16 can be treated as a packed vector.
194 static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {
195   assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
196          "Expected legal vector type!");
197   return VT.isFixedLengthVector() ||
198          VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock;
199 }
200 
201 // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
202 // predicate and end with a passthru value matching the result type.
203 static bool isMergePassthruOpcode(unsigned Opc) {
204   switch (Opc) {
205   default:
206     return false;
207   case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
208   case AArch64ISD::BSWAP_MERGE_PASSTHRU:
209   case AArch64ISD::REVH_MERGE_PASSTHRU:
210   case AArch64ISD::REVW_MERGE_PASSTHRU:
211   case AArch64ISD::REVD_MERGE_PASSTHRU:
212   case AArch64ISD::CTLZ_MERGE_PASSTHRU:
213   case AArch64ISD::CTPOP_MERGE_PASSTHRU:
214   case AArch64ISD::DUP_MERGE_PASSTHRU:
215   case AArch64ISD::ABS_MERGE_PASSTHRU:
216   case AArch64ISD::NEG_MERGE_PASSTHRU:
217   case AArch64ISD::FNEG_MERGE_PASSTHRU:
218   case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
219   case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
220   case AArch64ISD::FCEIL_MERGE_PASSTHRU:
221   case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
222   case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
223   case AArch64ISD::FRINT_MERGE_PASSTHRU:
224   case AArch64ISD::FROUND_MERGE_PASSTHRU:
225   case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
226   case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
227   case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
228   case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
229   case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
230   case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU:
231   case AArch64ISD::FCVTZU_MERGE_PASSTHRU:
232   case AArch64ISD::FCVTZS_MERGE_PASSTHRU:
233   case AArch64ISD::FSQRT_MERGE_PASSTHRU:
234   case AArch64ISD::FRECPX_MERGE_PASSTHRU:
235   case AArch64ISD::FABS_MERGE_PASSTHRU:
236     return true;
237   }
238 }
239 
240 // Returns true if inactive lanes are known to be zeroed by construction.
241 static bool isZeroingInactiveLanes(SDValue Op) {
242   switch (Op.getOpcode()) {
243   default:
244     // We guarantee i1 splat_vectors to zero the other lanes by
245     // implementing it with ptrue and possibly a punpklo for nxv1i1.
246     if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
247       return true;
248     return false;
249   case AArch64ISD::PTRUE:
250   case AArch64ISD::SETCC_MERGE_ZERO:
251     return true;
252   case ISD::INTRINSIC_WO_CHAIN:
253     switch (Op.getConstantOperandVal(0)) {
254     default:
255       return false;
256     case Intrinsic::aarch64_sve_ptrue:
257     case Intrinsic::aarch64_sve_pnext:
258     case Intrinsic::aarch64_sve_cmpeq:
259     case Intrinsic::aarch64_sve_cmpne:
260     case Intrinsic::aarch64_sve_cmpge:
261     case Intrinsic::aarch64_sve_cmpgt:
262     case Intrinsic::aarch64_sve_cmphs:
263     case Intrinsic::aarch64_sve_cmphi:
264     case Intrinsic::aarch64_sve_cmpeq_wide:
265     case Intrinsic::aarch64_sve_cmpne_wide:
266     case Intrinsic::aarch64_sve_cmpge_wide:
267     case Intrinsic::aarch64_sve_cmpgt_wide:
268     case Intrinsic::aarch64_sve_cmplt_wide:
269     case Intrinsic::aarch64_sve_cmple_wide:
270     case Intrinsic::aarch64_sve_cmphs_wide:
271     case Intrinsic::aarch64_sve_cmphi_wide:
272     case Intrinsic::aarch64_sve_cmplo_wide:
273     case Intrinsic::aarch64_sve_cmpls_wide:
274     case Intrinsic::aarch64_sve_fcmpeq:
275     case Intrinsic::aarch64_sve_fcmpne:
276     case Intrinsic::aarch64_sve_fcmpge:
277     case Intrinsic::aarch64_sve_fcmpgt:
278     case Intrinsic::aarch64_sve_fcmpuo:
279       return true;
280     }
281   }
282 }
283 
284 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
285                                              const AArch64Subtarget &STI)
286     : TargetLowering(TM), Subtarget(&STI) {
287   // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
288   // we have to make something up. Arbitrarily, choose ZeroOrOne.
289   setBooleanContents(ZeroOrOneBooleanContent);
290   // When comparing vectors the result sets the different elements in the
291   // vector to all-one or all-zero.
292   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
293 
294   // Set up the register classes.
295   addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
296   addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
297 
298   if (Subtarget->hasLS64()) {
299     addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass);
300     setOperationAction(ISD::LOAD, MVT::i64x8, Custom);
301     setOperationAction(ISD::STORE, MVT::i64x8, Custom);
302   }
303 
304   if (Subtarget->hasFPARMv8()) {
305     addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
306     addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
307     addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
308     addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
309     addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
310   }
311 
312   if (Subtarget->hasNEON()) {
313     addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
314     addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
315     // Someone set us up the NEON.
316     addDRTypeForNEON(MVT::v2f32);
317     addDRTypeForNEON(MVT::v8i8);
318     addDRTypeForNEON(MVT::v4i16);
319     addDRTypeForNEON(MVT::v2i32);
320     addDRTypeForNEON(MVT::v1i64);
321     addDRTypeForNEON(MVT::v1f64);
322     addDRTypeForNEON(MVT::v4f16);
323     if (Subtarget->hasBF16())
324       addDRTypeForNEON(MVT::v4bf16);
325 
326     addQRTypeForNEON(MVT::v4f32);
327     addQRTypeForNEON(MVT::v2f64);
328     addQRTypeForNEON(MVT::v16i8);
329     addQRTypeForNEON(MVT::v8i16);
330     addQRTypeForNEON(MVT::v4i32);
331     addQRTypeForNEON(MVT::v2i64);
332     addQRTypeForNEON(MVT::v8f16);
333     if (Subtarget->hasBF16())
334       addQRTypeForNEON(MVT::v8bf16);
335   }
336 
337   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
338     // Add legal sve predicate types
339     addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass);
340     addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
341     addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
342     addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
343     addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
344 
345     // Add legal sve data types
346     addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
347     addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
348     addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
349     addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
350 
351     addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
352     addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
353     addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
354     addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
355     addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
356     addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
357 
358     if (Subtarget->hasBF16()) {
359       addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass);
360       addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass);
361       addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass);
362     }
363 
364     if (Subtarget->useSVEForFixedLengthVectors()) {
365       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
366         if (useSVEForFixedLengthVectorVT(VT))
367           addRegisterClass(VT, &AArch64::ZPRRegClass);
368 
369       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
370         if (useSVEForFixedLengthVectorVT(VT))
371           addRegisterClass(VT, &AArch64::ZPRRegClass);
372     }
373   }
374 
375   // Compute derived properties from the register classes
376   computeRegisterProperties(Subtarget->getRegisterInfo());
377 
378   // Provide all sorts of operation actions
379   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
380   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
381   setOperationAction(ISD::SETCC, MVT::i32, Custom);
382   setOperationAction(ISD::SETCC, MVT::i64, Custom);
383   setOperationAction(ISD::SETCC, MVT::f16, Custom);
384   setOperationAction(ISD::SETCC, MVT::f32, Custom);
385   setOperationAction(ISD::SETCC, MVT::f64, Custom);
386   setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
387   setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
388   setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
389   setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
390   setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
391   setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
392   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
393   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
394   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
395   setOperationAction(ISD::BR_CC, MVT::i32, Custom);
396   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
397   setOperationAction(ISD::BR_CC, MVT::f16, Custom);
398   setOperationAction(ISD::BR_CC, MVT::f32, Custom);
399   setOperationAction(ISD::BR_CC, MVT::f64, Custom);
400   setOperationAction(ISD::SELECT, MVT::i32, Custom);
401   setOperationAction(ISD::SELECT, MVT::i64, Custom);
402   setOperationAction(ISD::SELECT, MVT::f16, Custom);
403   setOperationAction(ISD::SELECT, MVT::f32, Custom);
404   setOperationAction(ISD::SELECT, MVT::f64, Custom);
405   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
406   setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
407   setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
408   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
409   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
410   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
411   setOperationAction(ISD::JumpTable, MVT::i64, Custom);
412 
413   setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
414   setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
415   setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
416 
417   setOperationAction(ISD::FREM, MVT::f32, Expand);
418   setOperationAction(ISD::FREM, MVT::f64, Expand);
419   setOperationAction(ISD::FREM, MVT::f80, Expand);
420 
421   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
422 
423   // Custom lowering hooks are needed for XOR
424   // to fold it into CSINC/CSINV.
425   setOperationAction(ISD::XOR, MVT::i32, Custom);
426   setOperationAction(ISD::XOR, MVT::i64, Custom);
427 
428   // Virtually no operation on f128 is legal, but LLVM can't expand them when
429   // there's a valid register class, so we need custom operations in most cases.
430   setOperationAction(ISD::FABS, MVT::f128, Expand);
431   setOperationAction(ISD::FADD, MVT::f128, LibCall);
432   setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
433   setOperationAction(ISD::FCOS, MVT::f128, Expand);
434   setOperationAction(ISD::FDIV, MVT::f128, LibCall);
435   setOperationAction(ISD::FMA, MVT::f128, Expand);
436   setOperationAction(ISD::FMUL, MVT::f128, LibCall);
437   setOperationAction(ISD::FNEG, MVT::f128, Expand);
438   setOperationAction(ISD::FPOW, MVT::f128, Expand);
439   setOperationAction(ISD::FREM, MVT::f128, Expand);
440   setOperationAction(ISD::FRINT, MVT::f128, Expand);
441   setOperationAction(ISD::FSIN, MVT::f128, Expand);
442   setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
443   setOperationAction(ISD::FSQRT, MVT::f128, Expand);
444   setOperationAction(ISD::FSUB, MVT::f128, LibCall);
445   setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
446   setOperationAction(ISD::SETCC, MVT::f128, Custom);
447   setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
448   setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
449   setOperationAction(ISD::BR_CC, MVT::f128, Custom);
450   setOperationAction(ISD::SELECT, MVT::f128, Custom);
451   setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
452   setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
453   // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently
454   // aren't handled.
455 
456   // Lowering for many of the conversions is actually specified by the non-f128
457   // type. The LowerXXX function will be trivial when f128 isn't involved.
458   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
459   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
460   setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
461   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
462   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
463   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
464   setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
465   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
466   setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
467   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
468   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
469   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
470   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
471   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
472   setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
473   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
474   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
475   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
476   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
477   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
478   setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
479   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
480   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
481   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
482   setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
483   setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
484   setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
485   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
486   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
487   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
488 
489   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
490   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
491   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
492   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
493 
494   // Variable arguments.
495   setOperationAction(ISD::VASTART, MVT::Other, Custom);
496   setOperationAction(ISD::VAARG, MVT::Other, Custom);
497   setOperationAction(ISD::VACOPY, MVT::Other, Custom);
498   setOperationAction(ISD::VAEND, MVT::Other, Expand);
499 
500   // Variable-sized objects.
501   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
502   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
503 
504   if (Subtarget->isTargetWindows())
505     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
506   else
507     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
508 
509   // Constant pool entries
510   setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
511 
512   // BlockAddress
513   setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
514 
515   // AArch64 lacks both left-rotate and popcount instructions.
516   setOperationAction(ISD::ROTL, MVT::i32, Expand);
517   setOperationAction(ISD::ROTL, MVT::i64, Expand);
518   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
519     setOperationAction(ISD::ROTL, VT, Expand);
520     setOperationAction(ISD::ROTR, VT, Expand);
521   }
522 
523   // AArch64 doesn't have i32 MULH{S|U}.
524   setOperationAction(ISD::MULHU, MVT::i32, Expand);
525   setOperationAction(ISD::MULHS, MVT::i32, Expand);
526 
527   // AArch64 doesn't have {U|S}MUL_LOHI.
528   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
529   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
530 
531   setOperationAction(ISD::CTPOP, MVT::i32, Custom);
532   setOperationAction(ISD::CTPOP, MVT::i64, Custom);
533   setOperationAction(ISD::CTPOP, MVT::i128, Custom);
534 
535   setOperationAction(ISD::PARITY, MVT::i64, Custom);
536   setOperationAction(ISD::PARITY, MVT::i128, Custom);
537 
538   setOperationAction(ISD::ABS, MVT::i32, Custom);
539   setOperationAction(ISD::ABS, MVT::i64, Custom);
540 
541   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
542   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
543   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
544     setOperationAction(ISD::SDIVREM, VT, Expand);
545     setOperationAction(ISD::UDIVREM, VT, Expand);
546   }
547   setOperationAction(ISD::SREM, MVT::i32, Expand);
548   setOperationAction(ISD::SREM, MVT::i64, Expand);
549   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
550   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
551   setOperationAction(ISD::UREM, MVT::i32, Expand);
552   setOperationAction(ISD::UREM, MVT::i64, Expand);
553 
554   // Custom lower Add/Sub/Mul with overflow.
555   setOperationAction(ISD::SADDO, MVT::i32, Custom);
556   setOperationAction(ISD::SADDO, MVT::i64, Custom);
557   setOperationAction(ISD::UADDO, MVT::i32, Custom);
558   setOperationAction(ISD::UADDO, MVT::i64, Custom);
559   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
560   setOperationAction(ISD::SSUBO, MVT::i64, Custom);
561   setOperationAction(ISD::USUBO, MVT::i32, Custom);
562   setOperationAction(ISD::USUBO, MVT::i64, Custom);
563   setOperationAction(ISD::SMULO, MVT::i32, Custom);
564   setOperationAction(ISD::SMULO, MVT::i64, Custom);
565   setOperationAction(ISD::UMULO, MVT::i32, Custom);
566   setOperationAction(ISD::UMULO, MVT::i64, Custom);
567 
568   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
569   setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
570   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
571   setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
572   setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
573   setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
574   setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
575   setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
576 
577   setOperationAction(ISD::FSIN, MVT::f32, Expand);
578   setOperationAction(ISD::FSIN, MVT::f64, Expand);
579   setOperationAction(ISD::FCOS, MVT::f32, Expand);
580   setOperationAction(ISD::FCOS, MVT::f64, Expand);
581   setOperationAction(ISD::FPOW, MVT::f32, Expand);
582   setOperationAction(ISD::FPOW, MVT::f64, Expand);
583   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
584   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
585   if (Subtarget->hasFullFP16())
586     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom);
587   else
588     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
589 
590   for (auto Op : {ISD::FREM,        ISD::FPOW,         ISD::FPOWI,
591                   ISD::FCOS,        ISD::FSIN,         ISD::FSINCOS,
592                   ISD::FEXP,        ISD::FEXP2,        ISD::FLOG,
593                   ISD::FLOG2,       ISD::FLOG10,       ISD::STRICT_FREM,
594                   ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS,
595                   ISD::STRICT_FSIN, ISD::STRICT_FEXP,  ISD::STRICT_FEXP2,
596                   ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) {
597     setOperationAction(Op, MVT::f16, Promote);
598     setOperationAction(Op, MVT::v4f16, Expand);
599     setOperationAction(Op, MVT::v8f16, Expand);
600   }
601 
602   if (!Subtarget->hasFullFP16()) {
603     for (auto Op :
604          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
605           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
606           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
607           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
608           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
609           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
610           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
611           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
612           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
613           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
614           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
615           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
616           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
617           ISD::STRICT_FMAXIMUM})
618       setOperationAction(Op, MVT::f16, Promote);
619 
620     // Round-to-integer need custom lowering for fp16, as Promote doesn't work
621     // because the result type is integer.
622     for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
623                     ISD::STRICT_LLRINT})
624       setOperationAction(Op, MVT::f16, Custom);
625 
626     // promote v4f16 to v4f32 when that is known to be safe.
627     setOperationAction(ISD::FADD,        MVT::v4f16, Promote);
628     setOperationAction(ISD::FSUB,        MVT::v4f16, Promote);
629     setOperationAction(ISD::FMUL,        MVT::v4f16, Promote);
630     setOperationAction(ISD::FDIV,        MVT::v4f16, Promote);
631     AddPromotedToType(ISD::FADD,         MVT::v4f16, MVT::v4f32);
632     AddPromotedToType(ISD::FSUB,         MVT::v4f16, MVT::v4f32);
633     AddPromotedToType(ISD::FMUL,         MVT::v4f16, MVT::v4f32);
634     AddPromotedToType(ISD::FDIV,         MVT::v4f16, MVT::v4f32);
635 
636     setOperationAction(ISD::FABS,        MVT::v4f16, Expand);
637     setOperationAction(ISD::FNEG,        MVT::v4f16, Expand);
638     setOperationAction(ISD::FROUND,      MVT::v4f16, Expand);
639     setOperationAction(ISD::FROUNDEVEN,  MVT::v4f16, Expand);
640     setOperationAction(ISD::FMA,         MVT::v4f16, Expand);
641     setOperationAction(ISD::SETCC,       MVT::v4f16, Expand);
642     setOperationAction(ISD::BR_CC,       MVT::v4f16, Expand);
643     setOperationAction(ISD::SELECT,      MVT::v4f16, Expand);
644     setOperationAction(ISD::SELECT_CC,   MVT::v4f16, Expand);
645     setOperationAction(ISD::FTRUNC,      MVT::v4f16, Expand);
646     setOperationAction(ISD::FCOPYSIGN,   MVT::v4f16, Expand);
647     setOperationAction(ISD::FFLOOR,      MVT::v4f16, Expand);
648     setOperationAction(ISD::FCEIL,       MVT::v4f16, Expand);
649     setOperationAction(ISD::FRINT,       MVT::v4f16, Expand);
650     setOperationAction(ISD::FNEARBYINT,  MVT::v4f16, Expand);
651     setOperationAction(ISD::FSQRT,       MVT::v4f16, Expand);
652 
653     setOperationAction(ISD::FABS,        MVT::v8f16, Expand);
654     setOperationAction(ISD::FADD,        MVT::v8f16, Expand);
655     setOperationAction(ISD::FCEIL,       MVT::v8f16, Expand);
656     setOperationAction(ISD::FCOPYSIGN,   MVT::v8f16, Expand);
657     setOperationAction(ISD::FDIV,        MVT::v8f16, Expand);
658     setOperationAction(ISD::FFLOOR,      MVT::v8f16, Expand);
659     setOperationAction(ISD::FMA,         MVT::v8f16, Expand);
660     setOperationAction(ISD::FMUL,        MVT::v8f16, Expand);
661     setOperationAction(ISD::FNEARBYINT,  MVT::v8f16, Expand);
662     setOperationAction(ISD::FNEG,        MVT::v8f16, Expand);
663     setOperationAction(ISD::FROUND,      MVT::v8f16, Expand);
664     setOperationAction(ISD::FROUNDEVEN,  MVT::v8f16, Expand);
665     setOperationAction(ISD::FRINT,       MVT::v8f16, Expand);
666     setOperationAction(ISD::FSQRT,       MVT::v8f16, Expand);
667     setOperationAction(ISD::FSUB,        MVT::v8f16, Expand);
668     setOperationAction(ISD::FTRUNC,      MVT::v8f16, Expand);
669     setOperationAction(ISD::SETCC,       MVT::v8f16, Expand);
670     setOperationAction(ISD::BR_CC,       MVT::v8f16, Expand);
671     setOperationAction(ISD::SELECT,      MVT::v8f16, Expand);
672     setOperationAction(ISD::SELECT_CC,   MVT::v8f16, Expand);
673     setOperationAction(ISD::FP_EXTEND,   MVT::v8f16, Expand);
674   }
675 
676   // AArch64 has implementations of a lot of rounding-like FP operations.
677   for (auto Op :
678        {ISD::FFLOOR,          ISD::FNEARBYINT,      ISD::FCEIL,
679         ISD::FRINT,           ISD::FTRUNC,          ISD::FROUND,
680         ISD::FROUNDEVEN,      ISD::FMINNUM,         ISD::FMAXNUM,
681         ISD::FMINIMUM,        ISD::FMAXIMUM,        ISD::LROUND,
682         ISD::LLROUND,         ISD::LRINT,           ISD::LLRINT,
683         ISD::STRICT_FFLOOR,   ISD::STRICT_FCEIL,    ISD::STRICT_FNEARBYINT,
684         ISD::STRICT_FRINT,    ISD::STRICT_FTRUNC,   ISD::STRICT_FROUNDEVEN,
685         ISD::STRICT_FROUND,   ISD::STRICT_FMINNUM,  ISD::STRICT_FMAXNUM,
686         ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND,
687         ISD::STRICT_LLROUND,  ISD::STRICT_LRINT,    ISD::STRICT_LLRINT}) {
688     for (MVT Ty : {MVT::f32, MVT::f64})
689       setOperationAction(Op, Ty, Legal);
690     if (Subtarget->hasFullFP16())
691       setOperationAction(Op, MVT::f16, Legal);
692   }
693 
694   // Basic strict FP operations are legal
695   for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
696                   ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) {
697     for (MVT Ty : {MVT::f32, MVT::f64})
698       setOperationAction(Op, Ty, Legal);
699     if (Subtarget->hasFullFP16())
700       setOperationAction(Op, MVT::f16, Legal);
701   }
702 
703   // Strict conversion to a larger type is legal
704   for (auto VT : {MVT::f32, MVT::f64})
705     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
706 
707   setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
708 
709   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
710   setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
711 
712   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
713   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
714   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
715   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
716   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
717 
718   // Generate outline atomics library calls only if LSE was not specified for
719   // subtarget
720   if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) {
721     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall);
722     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall);
723     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
724     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall);
725     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall);
726     setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall);
727     setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall);
728     setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
729     setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall);
730     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall);
731     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall);
732     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
733     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall);
734     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall);
735     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall);
736     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
737     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall);
738     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall);
739     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall);
740     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall);
741     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall);
742     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall);
743     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall);
744     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
745     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall);
746 #define LCALLNAMES(A, B, N)                                                    \
747   setLibcallName(A##N##_RELAX, #B #N "_relax");                                \
748   setLibcallName(A##N##_ACQ, #B #N "_acq");                                    \
749   setLibcallName(A##N##_REL, #B #N "_rel");                                    \
750   setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
751 #define LCALLNAME4(A, B)                                                       \
752   LCALLNAMES(A, B, 1)                                                          \
753   LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
754 #define LCALLNAME5(A, B)                                                       \
755   LCALLNAMES(A, B, 1)                                                          \
756   LCALLNAMES(A, B, 2)                                                          \
757   LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)
758     LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas)
759     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp)
760     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd)
761     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset)
762     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr)
763     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor)
764 #undef LCALLNAMES
765 #undef LCALLNAME4
766 #undef LCALLNAME5
767   }
768 
769   // 128-bit loads and stores can be done without expanding
770   setOperationAction(ISD::LOAD, MVT::i128, Custom);
771   setOperationAction(ISD::STORE, MVT::i128, Custom);
772 
773   // Aligned 128-bit loads and stores are single-copy atomic according to the
774   // v8.4a spec.
775   if (Subtarget->hasLSE2()) {
776     setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
777     setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
778   }
779 
780   // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the
781   // custom lowering, as there are no un-paired non-temporal stores and
782   // legalization will break up 256 bit inputs.
783   setOperationAction(ISD::STORE, MVT::v32i8, Custom);
784   setOperationAction(ISD::STORE, MVT::v16i16, Custom);
785   setOperationAction(ISD::STORE, MVT::v16f16, Custom);
786   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
787   setOperationAction(ISD::STORE, MVT::v8f32, Custom);
788   setOperationAction(ISD::STORE, MVT::v4f64, Custom);
789   setOperationAction(ISD::STORE, MVT::v4i64, Custom);
790 
791   // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
792   // This requires the Performance Monitors extension.
793   if (Subtarget->hasPerfMon())
794     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
795 
796   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
797       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
798     // Issue __sincos_stret if available.
799     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
800     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
801   } else {
802     setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
803     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
804   }
805 
806   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
807     // MSVCRT doesn't have powi; fall back to pow
808     setLibcallName(RTLIB::POWI_F32, nullptr);
809     setLibcallName(RTLIB::POWI_F64, nullptr);
810   }
811 
812   // Make floating-point constants legal for the large code model, so they don't
813   // become loads from the constant pool.
814   if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
815     setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
816     setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
817   }
818 
819   // AArch64 does not have floating-point extending loads, i1 sign-extending
820   // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
821   for (MVT VT : MVT::fp_valuetypes()) {
822     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
823     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
824     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
825     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
826   }
827   for (MVT VT : MVT::integer_valuetypes())
828     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
829 
830   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
831   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
832   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
833   setTruncStoreAction(MVT::f128, MVT::f80, Expand);
834   setTruncStoreAction(MVT::f128, MVT::f64, Expand);
835   setTruncStoreAction(MVT::f128, MVT::f32, Expand);
836   setTruncStoreAction(MVT::f128, MVT::f16, Expand);
837 
838   setOperationAction(ISD::BITCAST, MVT::i16, Custom);
839   setOperationAction(ISD::BITCAST, MVT::f16, Custom);
840   setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
841 
842   // Indexed loads and stores are supported.
843   for (unsigned im = (unsigned)ISD::PRE_INC;
844        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
845     setIndexedLoadAction(im, MVT::i8, Legal);
846     setIndexedLoadAction(im, MVT::i16, Legal);
847     setIndexedLoadAction(im, MVT::i32, Legal);
848     setIndexedLoadAction(im, MVT::i64, Legal);
849     setIndexedLoadAction(im, MVT::f64, Legal);
850     setIndexedLoadAction(im, MVT::f32, Legal);
851     setIndexedLoadAction(im, MVT::f16, Legal);
852     setIndexedLoadAction(im, MVT::bf16, Legal);
853     setIndexedStoreAction(im, MVT::i8, Legal);
854     setIndexedStoreAction(im, MVT::i16, Legal);
855     setIndexedStoreAction(im, MVT::i32, Legal);
856     setIndexedStoreAction(im, MVT::i64, Legal);
857     setIndexedStoreAction(im, MVT::f64, Legal);
858     setIndexedStoreAction(im, MVT::f32, Legal);
859     setIndexedStoreAction(im, MVT::f16, Legal);
860     setIndexedStoreAction(im, MVT::bf16, Legal);
861   }
862 
863   // Trap.
864   setOperationAction(ISD::TRAP, MVT::Other, Legal);
865   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
866   setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
867 
868   // We combine OR nodes for bitfield operations.
869   setTargetDAGCombine(ISD::OR);
870   // Try to create BICs for vector ANDs.
871   setTargetDAGCombine(ISD::AND);
872 
873   // Vector add and sub nodes may conceal a high-half opportunity.
874   // Also, try to fold ADD into CSINC/CSINV..
875   setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP,
876                        ISD::UINT_TO_FP});
877 
878   setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
879                        ISD::FP_TO_UINT_SAT, ISD::FDIV});
880 
881   // Try and combine setcc with csel
882   setTargetDAGCombine(ISD::SETCC);
883 
884   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
885 
886   setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND,
887                        ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG,
888                        ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR,
889                        ISD::INSERT_SUBVECTOR, ISD::STORE, ISD::BUILD_VECTOR});
890   if (Subtarget->supportsAddressTopByteIgnored())
891     setTargetDAGCombine(ISD::LOAD);
892 
893   setTargetDAGCombine(ISD::MSTORE);
894 
895   setTargetDAGCombine(ISD::MUL);
896 
897   setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
898 
899   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
900                        ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
901                        ISD::VECREDUCE_ADD, ISD::STEP_VECTOR});
902 
903   setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER});
904 
905   setTargetDAGCombine(ISD::FP_EXTEND);
906 
907   setTargetDAGCombine(ISD::GlobalAddress);
908 
909   // In case of strict alignment, avoid an excessive number of byte wide stores.
910   MaxStoresPerMemsetOptSize = 8;
911   MaxStoresPerMemset =
912       Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32;
913 
914   MaxGluedStoresPerMemcpy = 4;
915   MaxStoresPerMemcpyOptSize = 4;
916   MaxStoresPerMemcpy =
917       Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16;
918 
919   MaxStoresPerMemmoveOptSize = 4;
920   MaxStoresPerMemmove = 4;
921 
922   MaxLoadsPerMemcmpOptSize = 4;
923   MaxLoadsPerMemcmp =
924       Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8;
925 
926   setStackPointerRegisterToSaveRestore(AArch64::SP);
927 
928   setSchedulingPreference(Sched::Hybrid);
929 
930   EnableExtLdPromotion = true;
931 
932   // Set required alignment.
933   setMinFunctionAlignment(Align(4));
934   // Set preferred alignments.
935   setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
936   setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
937   setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
938 
939   // Only change the limit for entries in a jump table if specified by
940   // the sub target, but not at the command line.
941   unsigned MaxJT = STI.getMaximumJumpTableSize();
942   if (MaxJT && getMaximumJumpTableSize() == UINT_MAX)
943     setMaximumJumpTableSize(MaxJT);
944 
945   setHasExtractBitsInsn(true);
946 
947   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
948 
949   if (Subtarget->hasNEON()) {
950     // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
951     // silliness like this:
952     for (auto Op :
953          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
954           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
955           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
956           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
957           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
958           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
959           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
960           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
961           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
962           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
963           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
964           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
965           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
966           ISD::STRICT_FMAXIMUM})
967       setOperationAction(Op, MVT::v1f64, Expand);
968 
969     for (auto Op :
970          {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP,
971           ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL,
972           ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT,
973           ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND})
974       setOperationAction(Op, MVT::v1i64, Expand);
975 
976     // AArch64 doesn't have a direct vector ->f32 conversion instructions for
977     // elements smaller than i32, so promote the input to i32 first.
978     setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32);
979     setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32);
980 
981     // Similarly, there is no direct i32 -> f64 vector conversion instruction.
982     // Or, direct i32 -> f16 vector conversion.  Set it so custom, so the
983     // conversion happens in two steps: v4i32 -> v4f32 -> v4f16
984     for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
985                     ISD::STRICT_UINT_TO_FP})
986       for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32})
987         setOperationAction(Op, VT, Custom);
988 
989     if (Subtarget->hasFullFP16()) {
990       setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
991 
992       setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
993       setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
994       setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
995       setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
996       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
997       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
998       setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
999       setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1000     } else {
1001       // when AArch64 doesn't have fullfp16 support, promote the input
1002       // to i32 first.
1003       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32);
1004       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32);
1005       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32);
1006       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32);
1007       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32);
1008       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32);
1009       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32);
1010       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32);
1011     }
1012 
1013     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
1014     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
1015     setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal);
1016     setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal);
1017     setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom);
1018     setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom);
1019     setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1020     setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom);
1021     for (auto VT : {MVT::v1i64, MVT::v2i64}) {
1022       setOperationAction(ISD::UMAX, VT, Custom);
1023       setOperationAction(ISD::SMAX, VT, Custom);
1024       setOperationAction(ISD::UMIN, VT, Custom);
1025       setOperationAction(ISD::SMIN, VT, Custom);
1026     }
1027 
1028     // AArch64 doesn't have MUL.2d:
1029     setOperationAction(ISD::MUL, MVT::v2i64, Expand);
1030     // Custom handling for some quad-vector types to detect MULL.
1031     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
1032     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1033     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1034 
1035     // Saturates
1036     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1037                     MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1038       setOperationAction(ISD::SADDSAT, VT, Legal);
1039       setOperationAction(ISD::UADDSAT, VT, Legal);
1040       setOperationAction(ISD::SSUBSAT, VT, Legal);
1041       setOperationAction(ISD::USUBSAT, VT, Legal);
1042     }
1043 
1044     for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1045                    MVT::v4i32}) {
1046       setOperationAction(ISD::AVGFLOORS, VT, Legal);
1047       setOperationAction(ISD::AVGFLOORU, VT, Legal);
1048       setOperationAction(ISD::AVGCEILS, VT, Legal);
1049       setOperationAction(ISD::AVGCEILU, VT, Legal);
1050       setOperationAction(ISD::ABDS, VT, Legal);
1051       setOperationAction(ISD::ABDU, VT, Legal);
1052     }
1053 
1054     // Vector reductions
1055     for (MVT VT : { MVT::v4f16, MVT::v2f32,
1056                     MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1057       if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) {
1058         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1059         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1060 
1061         setOperationAction(ISD::VECREDUCE_FADD, VT, Legal);
1062       }
1063     }
1064     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1065                     MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1066       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1067       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1068       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1069       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1070       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1071     }
1072     setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
1073 
1074     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
1075     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
1076     // Likewise, narrowing and extending vector loads/stores aren't handled
1077     // directly.
1078     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1079       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
1080 
1081       if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
1082         setOperationAction(ISD::MULHS, VT, Legal);
1083         setOperationAction(ISD::MULHU, VT, Legal);
1084       } else {
1085         setOperationAction(ISD::MULHS, VT, Expand);
1086         setOperationAction(ISD::MULHU, VT, Expand);
1087       }
1088       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1089       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1090 
1091       setOperationAction(ISD::BSWAP, VT, Expand);
1092       setOperationAction(ISD::CTTZ, VT, Expand);
1093 
1094       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
1095         setTruncStoreAction(VT, InnerVT, Expand);
1096         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1097         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1098         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1099       }
1100     }
1101 
1102     // AArch64 has implementations of a lot of rounding-like FP operations.
1103     for (auto Op :
1104          {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
1105           ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR,
1106           ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT,
1107           ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) {
1108       for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64})
1109         setOperationAction(Op, Ty, Legal);
1110       if (Subtarget->hasFullFP16())
1111         for (MVT Ty : {MVT::v4f16, MVT::v8f16})
1112           setOperationAction(Op, Ty, Legal);
1113     }
1114 
1115     setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
1116 
1117     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i16, MVT::v4i8, Custom);
1118     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1119     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1120     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i32, MVT::v4i8, Custom);
1121     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1122     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1123 
1124     // ADDP custom lowering
1125     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1126       setOperationAction(ISD::ADD, VT, Custom);
1127     // FADDP custom lowering
1128     for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1129       setOperationAction(ISD::FADD, VT, Custom);
1130   }
1131 
1132   if (Subtarget->hasSME()) {
1133     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1134   }
1135 
1136   // FIXME: Move lowering for more nodes here if those are common between
1137   // SVE and SME.
1138   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
1139     for (auto VT :
1140          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1141       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1142       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1143     }
1144   }
1145 
1146   if (Subtarget->hasSVE()) {
1147     for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
1148       setOperationAction(ISD::BITREVERSE, VT, Custom);
1149       setOperationAction(ISD::BSWAP, VT, Custom);
1150       setOperationAction(ISD::CTLZ, VT, Custom);
1151       setOperationAction(ISD::CTPOP, VT, Custom);
1152       setOperationAction(ISD::CTTZ, VT, Custom);
1153       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1154       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1155       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1156       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1157       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1158       setOperationAction(ISD::MGATHER, VT, Custom);
1159       setOperationAction(ISD::MSCATTER, VT, Custom);
1160       setOperationAction(ISD::MLOAD, VT, Custom);
1161       setOperationAction(ISD::MUL, VT, Custom);
1162       setOperationAction(ISD::MULHS, VT, Custom);
1163       setOperationAction(ISD::MULHU, VT, Custom);
1164       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1165       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1166       setOperationAction(ISD::SELECT, VT, Custom);
1167       setOperationAction(ISD::SETCC, VT, Custom);
1168       setOperationAction(ISD::SDIV, VT, Custom);
1169       setOperationAction(ISD::UDIV, VT, Custom);
1170       setOperationAction(ISD::SMIN, VT, Custom);
1171       setOperationAction(ISD::UMIN, VT, Custom);
1172       setOperationAction(ISD::SMAX, VT, Custom);
1173       setOperationAction(ISD::UMAX, VT, Custom);
1174       setOperationAction(ISD::SHL, VT, Custom);
1175       setOperationAction(ISD::SRL, VT, Custom);
1176       setOperationAction(ISD::SRA, VT, Custom);
1177       setOperationAction(ISD::ABS, VT, Custom);
1178       setOperationAction(ISD::ABDS, VT, Custom);
1179       setOperationAction(ISD::ABDU, VT, Custom);
1180       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1181       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1182       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1183       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1184       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1185       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1186       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1187       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1188 
1189       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1190       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1191       setOperationAction(ISD::SELECT_CC, VT, Expand);
1192       setOperationAction(ISD::ROTL, VT, Expand);
1193       setOperationAction(ISD::ROTR, VT, Expand);
1194 
1195       setOperationAction(ISD::SADDSAT, VT, Legal);
1196       setOperationAction(ISD::UADDSAT, VT, Legal);
1197       setOperationAction(ISD::SSUBSAT, VT, Legal);
1198       setOperationAction(ISD::USUBSAT, VT, Legal);
1199       setOperationAction(ISD::UREM, VT, Expand);
1200       setOperationAction(ISD::SREM, VT, Expand);
1201       setOperationAction(ISD::SDIVREM, VT, Expand);
1202       setOperationAction(ISD::UDIVREM, VT, Expand);
1203     }
1204 
1205     // Illegal unpacked integer vector types.
1206     for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
1207       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1208       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1209     }
1210 
1211     // Legalize unpacked bitcasts to REINTERPRET_CAST.
1212     for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
1213                     MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
1214       setOperationAction(ISD::BITCAST, VT, Custom);
1215 
1216     for (auto VT :
1217          { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
1218            MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
1219       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
1220 
1221     for (auto VT :
1222          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1223       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1224       setOperationAction(ISD::SELECT, VT, Custom);
1225       setOperationAction(ISD::SETCC, VT, Custom);
1226       setOperationAction(ISD::TRUNCATE, VT, Custom);
1227       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1228       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1229       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1230 
1231       setOperationAction(ISD::SELECT_CC, VT, Expand);
1232       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1233       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1234 
1235       // There are no legal MVT::nxv16f## based types.
1236       if (VT != MVT::nxv16i1) {
1237         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1238         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1239       }
1240     }
1241 
1242     // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does
1243     for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64,
1244                     MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1245                     MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1246       setOperationAction(ISD::MLOAD, VT, Custom);
1247       setOperationAction(ISD::MSTORE, VT, Custom);
1248       setOperationAction(ISD::MGATHER, VT, Custom);
1249       setOperationAction(ISD::MSCATTER, VT, Custom);
1250     }
1251 
1252     // Firstly, exclude all scalable vector extending loads/truncating stores,
1253     // include both integer and floating scalable vector.
1254     for (MVT VT : MVT::scalable_vector_valuetypes()) {
1255       for (MVT InnerVT : MVT::scalable_vector_valuetypes()) {
1256         setTruncStoreAction(VT, InnerVT, Expand);
1257         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1258         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1259         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1260       }
1261     }
1262 
1263     // Then, selectively enable those which we directly support.
1264     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal);
1265     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal);
1266     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal);
1267     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal);
1268     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal);
1269     setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal);
1270     for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1271       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal);
1272       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);
1273       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal);
1274       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal);
1275       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal);
1276       setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal);
1277     }
1278 
1279     // SVE supports truncating stores of 64 and 128-bit vectors
1280     setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
1281     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);
1282     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom);
1283     setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
1284     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
1285 
1286     for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
1287                     MVT::nxv4f32, MVT::nxv2f64}) {
1288       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1289       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1290       setOperationAction(ISD::MGATHER, VT, Custom);
1291       setOperationAction(ISD::MSCATTER, VT, Custom);
1292       setOperationAction(ISD::MLOAD, VT, Custom);
1293       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1294       setOperationAction(ISD::SELECT, VT, Custom);
1295       setOperationAction(ISD::FADD, VT, Custom);
1296       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1297       setOperationAction(ISD::FDIV, VT, Custom);
1298       setOperationAction(ISD::FMA, VT, Custom);
1299       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1300       setOperationAction(ISD::FMAXNUM, VT, Custom);
1301       setOperationAction(ISD::FMINIMUM, VT, Custom);
1302       setOperationAction(ISD::FMINNUM, VT, Custom);
1303       setOperationAction(ISD::FMUL, VT, Custom);
1304       setOperationAction(ISD::FNEG, VT, Custom);
1305       setOperationAction(ISD::FSUB, VT, Custom);
1306       setOperationAction(ISD::FCEIL, VT, Custom);
1307       setOperationAction(ISD::FFLOOR, VT, Custom);
1308       setOperationAction(ISD::FNEARBYINT, VT, Custom);
1309       setOperationAction(ISD::FRINT, VT, Custom);
1310       setOperationAction(ISD::FROUND, VT, Custom);
1311       setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1312       setOperationAction(ISD::FTRUNC, VT, Custom);
1313       setOperationAction(ISD::FSQRT, VT, Custom);
1314       setOperationAction(ISD::FABS, VT, Custom);
1315       setOperationAction(ISD::FP_EXTEND, VT, Custom);
1316       setOperationAction(ISD::FP_ROUND, VT, Custom);
1317       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1318       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1319       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1320       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1321       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1322 
1323       setOperationAction(ISD::SELECT_CC, VT, Expand);
1324       setOperationAction(ISD::FREM, VT, Expand);
1325       setOperationAction(ISD::FPOW, VT, Expand);
1326       setOperationAction(ISD::FPOWI, VT, Expand);
1327       setOperationAction(ISD::FCOS, VT, Expand);
1328       setOperationAction(ISD::FSIN, VT, Expand);
1329       setOperationAction(ISD::FSINCOS, VT, Expand);
1330       setOperationAction(ISD::FEXP, VT, Expand);
1331       setOperationAction(ISD::FEXP2, VT, Expand);
1332       setOperationAction(ISD::FLOG, VT, Expand);
1333       setOperationAction(ISD::FLOG2, VT, Expand);
1334       setOperationAction(ISD::FLOG10, VT, Expand);
1335 
1336       setCondCodeAction(ISD::SETO, VT, Expand);
1337       setCondCodeAction(ISD::SETOLT, VT, Expand);
1338       setCondCodeAction(ISD::SETLT, VT, Expand);
1339       setCondCodeAction(ISD::SETOLE, VT, Expand);
1340       setCondCodeAction(ISD::SETLE, VT, Expand);
1341       setCondCodeAction(ISD::SETULT, VT, Expand);
1342       setCondCodeAction(ISD::SETULE, VT, Expand);
1343       setCondCodeAction(ISD::SETUGE, VT, Expand);
1344       setCondCodeAction(ISD::SETUGT, VT, Expand);
1345       setCondCodeAction(ISD::SETUEQ, VT, Expand);
1346       setCondCodeAction(ISD::SETONE, VT, Expand);
1347     }
1348 
1349     for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
1350       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1351       setOperationAction(ISD::MGATHER, VT, Custom);
1352       setOperationAction(ISD::MSCATTER, VT, Custom);
1353       setOperationAction(ISD::MLOAD, VT, Custom);
1354       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1355       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1356     }
1357 
1358     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
1359     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
1360 
1361     // NEON doesn't support integer divides, but SVE does
1362     for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
1363                     MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1364       setOperationAction(ISD::SDIV, VT, Custom);
1365       setOperationAction(ISD::UDIV, VT, Custom);
1366     }
1367 
1368     // NEON doesn't support 64-bit vector integer muls, but SVE does.
1369     setOperationAction(ISD::MUL, MVT::v1i64, Custom);
1370     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1371 
1372     // NOTE: Currently this has to happen after computeRegisterProperties rather
1373     // than the preferred option of combining it with the addRegisterClass call.
1374     if (Subtarget->useSVEForFixedLengthVectors()) {
1375       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
1376         if (useSVEForFixedLengthVectorVT(VT))
1377           addTypeForFixedLengthSVE(VT);
1378       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
1379         if (useSVEForFixedLengthVectorVT(VT))
1380           addTypeForFixedLengthSVE(VT);
1381 
1382       // 64bit results can mean a bigger than NEON input.
1383       for (auto VT : {MVT::v8i8, MVT::v4i16})
1384         setOperationAction(ISD::TRUNCATE, VT, Custom);
1385       setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
1386 
1387       // 128bit results imply a bigger than NEON input.
1388       for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
1389         setOperationAction(ISD::TRUNCATE, VT, Custom);
1390       for (auto VT : {MVT::v8f16, MVT::v4f32})
1391         setOperationAction(ISD::FP_ROUND, VT, Custom);
1392 
1393       // These operations are not supported on NEON but SVE can do them.
1394       setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1395       setOperationAction(ISD::CTLZ, MVT::v1i64, Custom);
1396       setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1397       setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
1398       setOperationAction(ISD::MULHS, MVT::v1i64, Custom);
1399       setOperationAction(ISD::MULHS, MVT::v2i64, Custom);
1400       setOperationAction(ISD::MULHU, MVT::v1i64, Custom);
1401       setOperationAction(ISD::MULHU, MVT::v2i64, Custom);
1402       setOperationAction(ISD::SMAX, MVT::v1i64, Custom);
1403       setOperationAction(ISD::SMAX, MVT::v2i64, Custom);
1404       setOperationAction(ISD::SMIN, MVT::v1i64, Custom);
1405       setOperationAction(ISD::SMIN, MVT::v2i64, Custom);
1406       setOperationAction(ISD::UMAX, MVT::v1i64, Custom);
1407       setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
1408       setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
1409       setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
1410       setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
1411       setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
1412       setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
1413       setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
1414 
1415       // Int operations with no NEON support.
1416       for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1417                       MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
1418         setOperationAction(ISD::BITREVERSE, VT, Custom);
1419         setOperationAction(ISD::CTTZ, VT, Custom);
1420         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1421         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1422         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1423       }
1424 
1425       // FP operations with no NEON support.
1426       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32,
1427                       MVT::v1f64, MVT::v2f64})
1428         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1429 
1430       // Use SVE for vectors with more than 2 elements.
1431       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
1432         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1433     }
1434 
1435     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64);
1436     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
1437     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
1438     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
1439 
1440     setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1441   }
1442 
1443   if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
1444     // Only required for llvm.aarch64.mops.memset.tag
1445     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
1446   }
1447 
1448   PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
1449 
1450   IsStrictFPEnabled = true;
1451 }
1452 
1453 void AArch64TargetLowering::addTypeForNEON(MVT VT) {
1454   assert(VT.isVector() && "VT should be a vector type");
1455 
1456   if (VT.isFloatingPoint()) {
1457     MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT();
1458     setOperationPromotedToType(ISD::LOAD, VT, PromoteTo);
1459     setOperationPromotedToType(ISD::STORE, VT, PromoteTo);
1460   }
1461 
1462   // Mark vector float intrinsics as expand.
1463   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
1464     setOperationAction(ISD::FSIN, VT, Expand);
1465     setOperationAction(ISD::FCOS, VT, Expand);
1466     setOperationAction(ISD::FPOW, VT, Expand);
1467     setOperationAction(ISD::FLOG, VT, Expand);
1468     setOperationAction(ISD::FLOG2, VT, Expand);
1469     setOperationAction(ISD::FLOG10, VT, Expand);
1470     setOperationAction(ISD::FEXP, VT, Expand);
1471     setOperationAction(ISD::FEXP2, VT, Expand);
1472   }
1473 
1474   // But we do support custom-lowering for FCOPYSIGN.
1475   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
1476       ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16()))
1477     setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1478 
1479   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1480   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1481   setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1482   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1483   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1484   setOperationAction(ISD::SRA, VT, Custom);
1485   setOperationAction(ISD::SRL, VT, Custom);
1486   setOperationAction(ISD::SHL, VT, Custom);
1487   setOperationAction(ISD::OR, VT, Custom);
1488   setOperationAction(ISD::SETCC, VT, Custom);
1489   setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
1490 
1491   setOperationAction(ISD::SELECT, VT, Expand);
1492   setOperationAction(ISD::SELECT_CC, VT, Expand);
1493   setOperationAction(ISD::VSELECT, VT, Expand);
1494   for (MVT InnerVT : MVT::all_valuetypes())
1495     setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1496 
1497   // CNT supports only B element sizes, then use UADDLP to widen.
1498   if (VT != MVT::v8i8 && VT != MVT::v16i8)
1499     setOperationAction(ISD::CTPOP, VT, Custom);
1500 
1501   setOperationAction(ISD::UDIV, VT, Expand);
1502   setOperationAction(ISD::SDIV, VT, Expand);
1503   setOperationAction(ISD::UREM, VT, Expand);
1504   setOperationAction(ISD::SREM, VT, Expand);
1505   setOperationAction(ISD::FREM, VT, Expand);
1506 
1507   for (unsigned Opcode :
1508        {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
1509         ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1510     setOperationAction(Opcode, VT, Custom);
1511 
1512   if (!VT.isFloatingPoint())
1513     setOperationAction(ISD::ABS, VT, Legal);
1514 
1515   // [SU][MIN|MAX] are available for all NEON types apart from i64.
1516   if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
1517     for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
1518       setOperationAction(Opcode, VT, Legal);
1519 
1520   // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP
1521   // NEON types.
1522   if (VT.isFloatingPoint() &&
1523       VT.getVectorElementType() != MVT::bf16 &&
1524       (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()))
1525     for (unsigned Opcode :
1526          {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM,
1527           ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM,
1528           ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
1529           ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA,
1530           ISD::STRICT_FSQRT})
1531       setOperationAction(Opcode, VT, Legal);
1532 
1533   // Strict fp extend and trunc are legal
1534   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16)
1535     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
1536   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64)
1537     setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
1538 
1539   // FIXME: We could potentially make use of the vector comparison instructions
1540   // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of
1541   // complications:
1542   //  * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons,
1543   //    so we would need to expand when the condition code doesn't match the
1544   //    kind of comparison.
1545   //  * Some kinds of comparison require more than one FCMXY instruction so
1546   //    would need to be expanded instead.
1547   //  * The lowering of the non-strict versions involves target-specific ISD
1548   //    nodes so we would likely need to add strict versions of all of them and
1549   //    handle them appropriately.
1550   setOperationAction(ISD::STRICT_FSETCC, VT, Expand);
1551   setOperationAction(ISD::STRICT_FSETCCS, VT, Expand);
1552 
1553   if (Subtarget->isLittleEndian()) {
1554     for (unsigned im = (unsigned)ISD::PRE_INC;
1555          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1556       setIndexedLoadAction(im, VT, Legal);
1557       setIndexedStoreAction(im, VT, Legal);
1558     }
1559   }
1560 }
1561 
1562 bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
1563                                                           EVT OpVT) const {
1564   // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
1565   if (!Subtarget->hasSVE())
1566     return true;
1567 
1568   // We can only support legal predicate result types. We can use the SVE
1569   // whilelo instruction for generating fixed-width predicates too.
1570   if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
1571       ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
1572       ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
1573     return true;
1574 
1575   // The whilelo instruction only works with i32 or i64 scalar inputs.
1576   if (OpVT != MVT::i32 && OpVT != MVT::i64)
1577     return true;
1578 
1579   return false;
1580 }
1581 
1582 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1583   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
1584 
1585   // By default everything must be expanded.
1586   for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1587     setOperationAction(Op, VT, Expand);
1588 
1589   // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one.
1590   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1591 
1592   if (VT.isFloatingPoint()) {
1593     setCondCodeAction(ISD::SETO, VT, Expand);
1594     setCondCodeAction(ISD::SETOLT, VT, Expand);
1595     setCondCodeAction(ISD::SETLT, VT, Expand);
1596     setCondCodeAction(ISD::SETOLE, VT, Expand);
1597     setCondCodeAction(ISD::SETLE, VT, Expand);
1598     setCondCodeAction(ISD::SETULT, VT, Expand);
1599     setCondCodeAction(ISD::SETULE, VT, Expand);
1600     setCondCodeAction(ISD::SETUGE, VT, Expand);
1601     setCondCodeAction(ISD::SETUGT, VT, Expand);
1602     setCondCodeAction(ISD::SETUEQ, VT, Expand);
1603     setCondCodeAction(ISD::SETONE, VT, Expand);
1604   }
1605 
1606   // Mark integer truncating stores/extending loads as having custom lowering
1607   if (VT.isInteger()) {
1608     MVT InnerVT = VT.changeVectorElementType(MVT::i8);
1609     while (InnerVT != VT) {
1610       setTruncStoreAction(VT, InnerVT, Custom);
1611       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
1612       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
1613       InnerVT = InnerVT.changeVectorElementType(
1614           MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
1615     }
1616   }
1617 
1618   // Mark floating-point truncating stores/extending loads as having custom
1619   // lowering
1620   if (VT.isFloatingPoint()) {
1621     MVT InnerVT = VT.changeVectorElementType(MVT::f16);
1622     while (InnerVT != VT) {
1623       setTruncStoreAction(VT, InnerVT, Custom);
1624       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
1625       InnerVT = InnerVT.changeVectorElementType(
1626           MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
1627     }
1628   }
1629 
1630   // Lower fixed length vector operations to scalable equivalents.
1631   setOperationAction(ISD::ABS, VT, Custom);
1632   setOperationAction(ISD::ADD, VT, Custom);
1633   setOperationAction(ISD::AND, VT, Custom);
1634   setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1635   setOperationAction(ISD::BITCAST, VT, Custom);
1636   setOperationAction(ISD::BITREVERSE, VT, Custom);
1637   setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1638   setOperationAction(ISD::BSWAP, VT, Custom);
1639   setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1640   setOperationAction(ISD::CTLZ, VT, Custom);
1641   setOperationAction(ISD::CTPOP, VT, Custom);
1642   setOperationAction(ISD::CTTZ, VT, Custom);
1643   setOperationAction(ISD::FABS, VT, Custom);
1644   setOperationAction(ISD::FADD, VT, Custom);
1645   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1646   setOperationAction(ISD::FCEIL, VT, Custom);
1647   setOperationAction(ISD::FDIV, VT, Custom);
1648   setOperationAction(ISD::FFLOOR, VT, Custom);
1649   setOperationAction(ISD::FMA, VT, Custom);
1650   setOperationAction(ISD::FMAXIMUM, VT, Custom);
1651   setOperationAction(ISD::FMAXNUM, VT, Custom);
1652   setOperationAction(ISD::FMINIMUM, VT, Custom);
1653   setOperationAction(ISD::FMINNUM, VT, Custom);
1654   setOperationAction(ISD::FMUL, VT, Custom);
1655   setOperationAction(ISD::FNEARBYINT, VT, Custom);
1656   setOperationAction(ISD::FNEG, VT, Custom);
1657   setOperationAction(ISD::FP_EXTEND, VT, Custom);
1658   setOperationAction(ISD::FP_ROUND, VT, Custom);
1659   setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1660   setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1661   setOperationAction(ISD::FRINT, VT, Custom);
1662   setOperationAction(ISD::FROUND, VT, Custom);
1663   setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1664   setOperationAction(ISD::FSQRT, VT, Custom);
1665   setOperationAction(ISD::FSUB, VT, Custom);
1666   setOperationAction(ISD::FTRUNC, VT, Custom);
1667   setOperationAction(ISD::LOAD, VT, Custom);
1668   setOperationAction(ISD::MGATHER, VT, Custom);
1669   setOperationAction(ISD::MLOAD, VT, Custom);
1670   setOperationAction(ISD::MSCATTER, VT, Custom);
1671   setOperationAction(ISD::MSTORE, VT, Custom);
1672   setOperationAction(ISD::MUL, VT, Custom);
1673   setOperationAction(ISD::MULHS, VT, Custom);
1674   setOperationAction(ISD::MULHU, VT, Custom);
1675   setOperationAction(ISD::OR, VT, Custom);
1676   setOperationAction(ISD::SDIV, VT, Custom);
1677   setOperationAction(ISD::SELECT, VT, Custom);
1678   setOperationAction(ISD::SETCC, VT, Custom);
1679   setOperationAction(ISD::SHL, VT, Custom);
1680   setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1681   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1682   setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1683   setOperationAction(ISD::SMAX, VT, Custom);
1684   setOperationAction(ISD::SMIN, VT, Custom);
1685   setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1686   setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1687   setOperationAction(ISD::SRA, VT, Custom);
1688   setOperationAction(ISD::SRL, VT, Custom);
1689   setOperationAction(ISD::STORE, VT, Custom);
1690   setOperationAction(ISD::SUB, VT, Custom);
1691   setOperationAction(ISD::TRUNCATE, VT, Custom);
1692   setOperationAction(ISD::UDIV, VT, Custom);
1693   setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1694   setOperationAction(ISD::UMAX, VT, Custom);
1695   setOperationAction(ISD::UMIN, VT, Custom);
1696   setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1697   setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1698   setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1699   setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1700   setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1701   setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1702   setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1703   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1704   setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1705   setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1706   setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1707   setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1708   setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1709   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1710   setOperationAction(ISD::VSELECT, VT, Custom);
1711   setOperationAction(ISD::XOR, VT, Custom);
1712   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1713 }
1714 
1715 void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
1716   addRegisterClass(VT, &AArch64::FPR64RegClass);
1717   addTypeForNEON(VT);
1718 }
1719 
1720 void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
1721   addRegisterClass(VT, &AArch64::FPR128RegClass);
1722   addTypeForNEON(VT);
1723 }
1724 
1725 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
1726                                               LLVMContext &C, EVT VT) const {
1727   if (!VT.isVector())
1728     return MVT::i32;
1729   if (VT.isScalableVector())
1730     return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
1731   return VT.changeVectorElementTypeToInteger();
1732 }
1733 
1734 static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
1735                                const APInt &Demanded,
1736                                TargetLowering::TargetLoweringOpt &TLO,
1737                                unsigned NewOpc) {
1738   uint64_t OldImm = Imm, NewImm, Enc;
1739   uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
1740 
1741   // Return if the immediate is already all zeros, all ones, a bimm32 or a
1742   // bimm64.
1743   if (Imm == 0 || Imm == Mask ||
1744       AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
1745     return false;
1746 
1747   unsigned EltSize = Size;
1748   uint64_t DemandedBits = Demanded.getZExtValue();
1749 
1750   // Clear bits that are not demanded.
1751   Imm &= DemandedBits;
1752 
1753   while (true) {
1754     // The goal here is to set the non-demanded bits in a way that minimizes
1755     // the number of switching between 0 and 1. In order to achieve this goal,
1756     // we set the non-demanded bits to the value of the preceding demanded bits.
1757     // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
1758     // non-demanded bit), we copy bit0 (1) to the least significant 'x',
1759     // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
1760     // The final result is 0b11000011.
1761     uint64_t NonDemandedBits = ~DemandedBits;
1762     uint64_t InvertedImm = ~Imm & DemandedBits;
1763     uint64_t RotatedImm =
1764         ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
1765         NonDemandedBits;
1766     uint64_t Sum = RotatedImm + NonDemandedBits;
1767     bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
1768     uint64_t Ones = (Sum + Carry) & NonDemandedBits;
1769     NewImm = (Imm | Ones) & Mask;
1770 
1771     // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
1772     // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
1773     // we halve the element size and continue the search.
1774     if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
1775       break;
1776 
1777     // We cannot shrink the element size any further if it is 2-bits.
1778     if (EltSize == 2)
1779       return false;
1780 
1781     EltSize /= 2;
1782     Mask >>= EltSize;
1783     uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
1784 
1785     // Return if there is mismatch in any of the demanded bits of Imm and Hi.
1786     if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
1787       return false;
1788 
1789     // Merge the upper and lower halves of Imm and DemandedBits.
1790     Imm |= Hi;
1791     DemandedBits |= DemandedBitsHi;
1792   }
1793 
1794   ++NumOptimizedImms;
1795 
1796   // Replicate the element across the register width.
1797   while (EltSize < Size) {
1798     NewImm |= NewImm << EltSize;
1799     EltSize *= 2;
1800   }
1801 
1802   (void)OldImm;
1803   assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
1804          "demanded bits should never be altered");
1805   assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
1806 
1807   // Create the new constant immediate node.
1808   EVT VT = Op.getValueType();
1809   SDLoc DL(Op);
1810   SDValue New;
1811 
1812   // If the new constant immediate is all-zeros or all-ones, let the target
1813   // independent DAG combine optimize this node.
1814   if (NewImm == 0 || NewImm == OrigMask) {
1815     New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
1816                           TLO.DAG.getConstant(NewImm, DL, VT));
1817   // Otherwise, create a machine node so that target independent DAG combine
1818   // doesn't undo this optimization.
1819   } else {
1820     Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
1821     SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
1822     New = SDValue(
1823         TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
1824   }
1825 
1826   return TLO.CombineTo(Op, New);
1827 }
1828 
1829 bool AArch64TargetLowering::targetShrinkDemandedConstant(
1830     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
1831     TargetLoweringOpt &TLO) const {
1832   // Delay this optimization to as late as possible.
1833   if (!TLO.LegalOps)
1834     return false;
1835 
1836   if (!EnableOptimizeLogicalImm)
1837     return false;
1838 
1839   EVT VT = Op.getValueType();
1840   if (VT.isVector())
1841     return false;
1842 
1843   unsigned Size = VT.getSizeInBits();
1844   assert((Size == 32 || Size == 64) &&
1845          "i32 or i64 is expected after legalization.");
1846 
1847   // Exit early if we demand all bits.
1848   if (DemandedBits.countPopulation() == Size)
1849     return false;
1850 
1851   unsigned NewOpc;
1852   switch (Op.getOpcode()) {
1853   default:
1854     return false;
1855   case ISD::AND:
1856     NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
1857     break;
1858   case ISD::OR:
1859     NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
1860     break;
1861   case ISD::XOR:
1862     NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1863     break;
1864   }
1865   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1866   if (!C)
1867     return false;
1868   uint64_t Imm = C->getZExtValue();
1869   return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc);
1870 }
1871 
1872 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
1873 /// Mask are known to be either zero or one and return them Known.
1874 void AArch64TargetLowering::computeKnownBitsForTargetNode(
1875     const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1876     const SelectionDAG &DAG, unsigned Depth) const {
1877   switch (Op.getOpcode()) {
1878   default:
1879     break;
1880   case AArch64ISD::DUP: {
1881     SDValue SrcOp = Op.getOperand(0);
1882     Known = DAG.computeKnownBits(SrcOp, Depth + 1);
1883     if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) {
1884       assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&
1885              "Expected DUP implicit truncation");
1886       Known = Known.trunc(Op.getScalarValueSizeInBits());
1887     }
1888     break;
1889   }
1890   case AArch64ISD::CSEL: {
1891     KnownBits Known2;
1892     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1893     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1894     Known = KnownBits::commonBits(Known, Known2);
1895     break;
1896   }
1897   case AArch64ISD::BICi: {
1898     // Compute the bit cleared value.
1899     uint64_t Mask =
1900         ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
1901     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1902     Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
1903     break;
1904   }
1905   case AArch64ISD::VLSHR: {
1906     KnownBits Known2;
1907     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1908     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1909     Known = KnownBits::lshr(Known, Known2);
1910     break;
1911   }
1912   case AArch64ISD::VASHR: {
1913     KnownBits Known2;
1914     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1915     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1916     Known = KnownBits::ashr(Known, Known2);
1917     break;
1918   }
1919   case AArch64ISD::LOADgot:
1920   case AArch64ISD::ADDlow: {
1921     if (!Subtarget->isTargetILP32())
1922       break;
1923     // In ILP32 mode all valid pointers are in the low 4GB of the address-space.
1924     Known.Zero = APInt::getHighBitsSet(64, 32);
1925     break;
1926   }
1927   case AArch64ISD::ASSERT_ZEXT_BOOL: {
1928     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1929     Known.Zero |= APInt(Known.getBitWidth(), 0xFE);
1930     break;
1931   }
1932   case ISD::INTRINSIC_W_CHAIN: {
1933     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
1934     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
1935     switch (IntID) {
1936     default: return;
1937     case Intrinsic::aarch64_ldaxr:
1938     case Intrinsic::aarch64_ldxr: {
1939       unsigned BitWidth = Known.getBitWidth();
1940       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
1941       unsigned MemBits = VT.getScalarSizeInBits();
1942       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1943       return;
1944     }
1945     }
1946     break;
1947   }
1948   case ISD::INTRINSIC_WO_CHAIN:
1949   case ISD::INTRINSIC_VOID: {
1950     unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1951     switch (IntNo) {
1952     default:
1953       break;
1954     case Intrinsic::aarch64_neon_umaxv:
1955     case Intrinsic::aarch64_neon_uminv: {
1956       // Figure out the datatype of the vector operand. The UMINV instruction
1957       // will zero extend the result, so we can mark as known zero all the
1958       // bits larger than the element datatype. 32-bit or larget doesn't need
1959       // this as those are legal types and will be handled by isel directly.
1960       MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
1961       unsigned BitWidth = Known.getBitWidth();
1962       if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1963         assert(BitWidth >= 8 && "Unexpected width!");
1964         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
1965         Known.Zero |= Mask;
1966       } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1967         assert(BitWidth >= 16 && "Unexpected width!");
1968         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
1969         Known.Zero |= Mask;
1970       }
1971       break;
1972     } break;
1973     }
1974   }
1975   }
1976 }
1977 
1978 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
1979                                                   EVT) const {
1980   return MVT::i64;
1981 }
1982 
1983 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1984     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1985     bool *Fast) const {
1986   if (Subtarget->requiresStrictAlign())
1987     return false;
1988 
1989   if (Fast) {
1990     // Some CPUs are fine with unaligned stores except for 128-bit ones.
1991     *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
1992             // See comments in performSTORECombine() for more details about
1993             // these conditions.
1994 
1995             // Code that uses clang vector extensions can mark that it
1996             // wants unaligned accesses to be treated as fast by
1997             // underspecifying alignment to be 1 or 2.
1998             Alignment <= 2 ||
1999 
2000             // Disregard v2i64. Memcpy lowering produces those and splitting
2001             // them regresses performance on micro-benchmarks and olden/bh.
2002             VT == MVT::v2i64;
2003   }
2004   return true;
2005 }
2006 
2007 // Same as above but handling LLTs instead.
2008 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
2009     LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
2010     bool *Fast) const {
2011   if (Subtarget->requiresStrictAlign())
2012     return false;
2013 
2014   if (Fast) {
2015     // Some CPUs are fine with unaligned stores except for 128-bit ones.
2016     *Fast = !Subtarget->isMisaligned128StoreSlow() ||
2017             Ty.getSizeInBytes() != 16 ||
2018             // See comments in performSTORECombine() for more details about
2019             // these conditions.
2020 
2021             // Code that uses clang vector extensions can mark that it
2022             // wants unaligned accesses to be treated as fast by
2023             // underspecifying alignment to be 1 or 2.
2024             Alignment <= 2 ||
2025 
2026             // Disregard v2i64. Memcpy lowering produces those and splitting
2027             // them regresses performance on micro-benchmarks and olden/bh.
2028             Ty == LLT::fixed_vector(2, 64);
2029   }
2030   return true;
2031 }
2032 
2033 FastISel *
2034 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2035                                       const TargetLibraryInfo *libInfo) const {
2036   return AArch64::createFastISel(funcInfo, libInfo);
2037 }
2038 
2039 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
2040 #define MAKE_CASE(V)                                                           \
2041   case V:                                                                      \
2042     return #V;
2043   switch ((AArch64ISD::NodeType)Opcode) {
2044   case AArch64ISD::FIRST_NUMBER:
2045     break;
2046     MAKE_CASE(AArch64ISD::CALL)
2047     MAKE_CASE(AArch64ISD::ADRP)
2048     MAKE_CASE(AArch64ISD::ADR)
2049     MAKE_CASE(AArch64ISD::ADDlow)
2050     MAKE_CASE(AArch64ISD::LOADgot)
2051     MAKE_CASE(AArch64ISD::RET_FLAG)
2052     MAKE_CASE(AArch64ISD::BRCOND)
2053     MAKE_CASE(AArch64ISD::CSEL)
2054     MAKE_CASE(AArch64ISD::CSINV)
2055     MAKE_CASE(AArch64ISD::CSNEG)
2056     MAKE_CASE(AArch64ISD::CSINC)
2057     MAKE_CASE(AArch64ISD::THREAD_POINTER)
2058     MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
2059     MAKE_CASE(AArch64ISD::ABDS_PRED)
2060     MAKE_CASE(AArch64ISD::ABDU_PRED)
2061     MAKE_CASE(AArch64ISD::MUL_PRED)
2062     MAKE_CASE(AArch64ISD::MULHS_PRED)
2063     MAKE_CASE(AArch64ISD::MULHU_PRED)
2064     MAKE_CASE(AArch64ISD::SDIV_PRED)
2065     MAKE_CASE(AArch64ISD::SHL_PRED)
2066     MAKE_CASE(AArch64ISD::SMAX_PRED)
2067     MAKE_CASE(AArch64ISD::SMIN_PRED)
2068     MAKE_CASE(AArch64ISD::SRA_PRED)
2069     MAKE_CASE(AArch64ISD::SRL_PRED)
2070     MAKE_CASE(AArch64ISD::UDIV_PRED)
2071     MAKE_CASE(AArch64ISD::UMAX_PRED)
2072     MAKE_CASE(AArch64ISD::UMIN_PRED)
2073     MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1)
2074     MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU)
2075     MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU)
2076     MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU)
2077     MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU)
2078     MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU)
2079     MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU)
2080     MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU)
2081     MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU)
2082     MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU)
2083     MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU)
2084     MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU)
2085     MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU)
2086     MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU)
2087     MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU)
2088     MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU)
2089     MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU)
2090     MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
2091     MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
2092     MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
2093     MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
2094     MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
2095     MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
2096     MAKE_CASE(AArch64ISD::ADC)
2097     MAKE_CASE(AArch64ISD::SBC)
2098     MAKE_CASE(AArch64ISD::ADDS)
2099     MAKE_CASE(AArch64ISD::SUBS)
2100     MAKE_CASE(AArch64ISD::ADCS)
2101     MAKE_CASE(AArch64ISD::SBCS)
2102     MAKE_CASE(AArch64ISD::ANDS)
2103     MAKE_CASE(AArch64ISD::CCMP)
2104     MAKE_CASE(AArch64ISD::CCMN)
2105     MAKE_CASE(AArch64ISD::FCCMP)
2106     MAKE_CASE(AArch64ISD::FCMP)
2107     MAKE_CASE(AArch64ISD::STRICT_FCMP)
2108     MAKE_CASE(AArch64ISD::STRICT_FCMPE)
2109     MAKE_CASE(AArch64ISD::DUP)
2110     MAKE_CASE(AArch64ISD::DUPLANE8)
2111     MAKE_CASE(AArch64ISD::DUPLANE16)
2112     MAKE_CASE(AArch64ISD::DUPLANE32)
2113     MAKE_CASE(AArch64ISD::DUPLANE64)
2114     MAKE_CASE(AArch64ISD::DUPLANE128)
2115     MAKE_CASE(AArch64ISD::MOVI)
2116     MAKE_CASE(AArch64ISD::MOVIshift)
2117     MAKE_CASE(AArch64ISD::MOVIedit)
2118     MAKE_CASE(AArch64ISD::MOVImsl)
2119     MAKE_CASE(AArch64ISD::FMOV)
2120     MAKE_CASE(AArch64ISD::MVNIshift)
2121     MAKE_CASE(AArch64ISD::MVNImsl)
2122     MAKE_CASE(AArch64ISD::BICi)
2123     MAKE_CASE(AArch64ISD::ORRi)
2124     MAKE_CASE(AArch64ISD::BSP)
2125     MAKE_CASE(AArch64ISD::EXTR)
2126     MAKE_CASE(AArch64ISD::ZIP1)
2127     MAKE_CASE(AArch64ISD::ZIP2)
2128     MAKE_CASE(AArch64ISD::UZP1)
2129     MAKE_CASE(AArch64ISD::UZP2)
2130     MAKE_CASE(AArch64ISD::TRN1)
2131     MAKE_CASE(AArch64ISD::TRN2)
2132     MAKE_CASE(AArch64ISD::REV16)
2133     MAKE_CASE(AArch64ISD::REV32)
2134     MAKE_CASE(AArch64ISD::REV64)
2135     MAKE_CASE(AArch64ISD::EXT)
2136     MAKE_CASE(AArch64ISD::SPLICE)
2137     MAKE_CASE(AArch64ISD::VSHL)
2138     MAKE_CASE(AArch64ISD::VLSHR)
2139     MAKE_CASE(AArch64ISD::VASHR)
2140     MAKE_CASE(AArch64ISD::VSLI)
2141     MAKE_CASE(AArch64ISD::VSRI)
2142     MAKE_CASE(AArch64ISD::CMEQ)
2143     MAKE_CASE(AArch64ISD::CMGE)
2144     MAKE_CASE(AArch64ISD::CMGT)
2145     MAKE_CASE(AArch64ISD::CMHI)
2146     MAKE_CASE(AArch64ISD::CMHS)
2147     MAKE_CASE(AArch64ISD::FCMEQ)
2148     MAKE_CASE(AArch64ISD::FCMGE)
2149     MAKE_CASE(AArch64ISD::FCMGT)
2150     MAKE_CASE(AArch64ISD::CMEQz)
2151     MAKE_CASE(AArch64ISD::CMGEz)
2152     MAKE_CASE(AArch64ISD::CMGTz)
2153     MAKE_CASE(AArch64ISD::CMLEz)
2154     MAKE_CASE(AArch64ISD::CMLTz)
2155     MAKE_CASE(AArch64ISD::FCMEQz)
2156     MAKE_CASE(AArch64ISD::FCMGEz)
2157     MAKE_CASE(AArch64ISD::FCMGTz)
2158     MAKE_CASE(AArch64ISD::FCMLEz)
2159     MAKE_CASE(AArch64ISD::FCMLTz)
2160     MAKE_CASE(AArch64ISD::SADDV)
2161     MAKE_CASE(AArch64ISD::UADDV)
2162     MAKE_CASE(AArch64ISD::SDOT)
2163     MAKE_CASE(AArch64ISD::UDOT)
2164     MAKE_CASE(AArch64ISD::SMINV)
2165     MAKE_CASE(AArch64ISD::UMINV)
2166     MAKE_CASE(AArch64ISD::SMAXV)
2167     MAKE_CASE(AArch64ISD::UMAXV)
2168     MAKE_CASE(AArch64ISD::SADDV_PRED)
2169     MAKE_CASE(AArch64ISD::UADDV_PRED)
2170     MAKE_CASE(AArch64ISD::SMAXV_PRED)
2171     MAKE_CASE(AArch64ISD::UMAXV_PRED)
2172     MAKE_CASE(AArch64ISD::SMINV_PRED)
2173     MAKE_CASE(AArch64ISD::UMINV_PRED)
2174     MAKE_CASE(AArch64ISD::ORV_PRED)
2175     MAKE_CASE(AArch64ISD::EORV_PRED)
2176     MAKE_CASE(AArch64ISD::ANDV_PRED)
2177     MAKE_CASE(AArch64ISD::CLASTA_N)
2178     MAKE_CASE(AArch64ISD::CLASTB_N)
2179     MAKE_CASE(AArch64ISD::LASTA)
2180     MAKE_CASE(AArch64ISD::LASTB)
2181     MAKE_CASE(AArch64ISD::REINTERPRET_CAST)
2182     MAKE_CASE(AArch64ISD::LS64_BUILD)
2183     MAKE_CASE(AArch64ISD::LS64_EXTRACT)
2184     MAKE_CASE(AArch64ISD::TBL)
2185     MAKE_CASE(AArch64ISD::FADD_PRED)
2186     MAKE_CASE(AArch64ISD::FADDA_PRED)
2187     MAKE_CASE(AArch64ISD::FADDV_PRED)
2188     MAKE_CASE(AArch64ISD::FDIV_PRED)
2189     MAKE_CASE(AArch64ISD::FMA_PRED)
2190     MAKE_CASE(AArch64ISD::FMAX_PRED)
2191     MAKE_CASE(AArch64ISD::FMAXV_PRED)
2192     MAKE_CASE(AArch64ISD::FMAXNM_PRED)
2193     MAKE_CASE(AArch64ISD::FMAXNMV_PRED)
2194     MAKE_CASE(AArch64ISD::FMIN_PRED)
2195     MAKE_CASE(AArch64ISD::FMINV_PRED)
2196     MAKE_CASE(AArch64ISD::FMINNM_PRED)
2197     MAKE_CASE(AArch64ISD::FMINNMV_PRED)
2198     MAKE_CASE(AArch64ISD::FMUL_PRED)
2199     MAKE_CASE(AArch64ISD::FSUB_PRED)
2200     MAKE_CASE(AArch64ISD::RDSVL)
2201     MAKE_CASE(AArch64ISD::BIC)
2202     MAKE_CASE(AArch64ISD::BIT)
2203     MAKE_CASE(AArch64ISD::CBZ)
2204     MAKE_CASE(AArch64ISD::CBNZ)
2205     MAKE_CASE(AArch64ISD::TBZ)
2206     MAKE_CASE(AArch64ISD::TBNZ)
2207     MAKE_CASE(AArch64ISD::TC_RETURN)
2208     MAKE_CASE(AArch64ISD::PREFETCH)
2209     MAKE_CASE(AArch64ISD::SITOF)
2210     MAKE_CASE(AArch64ISD::UITOF)
2211     MAKE_CASE(AArch64ISD::NVCAST)
2212     MAKE_CASE(AArch64ISD::MRS)
2213     MAKE_CASE(AArch64ISD::SQSHL_I)
2214     MAKE_CASE(AArch64ISD::UQSHL_I)
2215     MAKE_CASE(AArch64ISD::SRSHR_I)
2216     MAKE_CASE(AArch64ISD::URSHR_I)
2217     MAKE_CASE(AArch64ISD::SQSHLU_I)
2218     MAKE_CASE(AArch64ISD::WrapperLarge)
2219     MAKE_CASE(AArch64ISD::LD2post)
2220     MAKE_CASE(AArch64ISD::LD3post)
2221     MAKE_CASE(AArch64ISD::LD4post)
2222     MAKE_CASE(AArch64ISD::ST2post)
2223     MAKE_CASE(AArch64ISD::ST3post)
2224     MAKE_CASE(AArch64ISD::ST4post)
2225     MAKE_CASE(AArch64ISD::LD1x2post)
2226     MAKE_CASE(AArch64ISD::LD1x3post)
2227     MAKE_CASE(AArch64ISD::LD1x4post)
2228     MAKE_CASE(AArch64ISD::ST1x2post)
2229     MAKE_CASE(AArch64ISD::ST1x3post)
2230     MAKE_CASE(AArch64ISD::ST1x4post)
2231     MAKE_CASE(AArch64ISD::LD1DUPpost)
2232     MAKE_CASE(AArch64ISD::LD2DUPpost)
2233     MAKE_CASE(AArch64ISD::LD3DUPpost)
2234     MAKE_CASE(AArch64ISD::LD4DUPpost)
2235     MAKE_CASE(AArch64ISD::LD1LANEpost)
2236     MAKE_CASE(AArch64ISD::LD2LANEpost)
2237     MAKE_CASE(AArch64ISD::LD3LANEpost)
2238     MAKE_CASE(AArch64ISD::LD4LANEpost)
2239     MAKE_CASE(AArch64ISD::ST2LANEpost)
2240     MAKE_CASE(AArch64ISD::ST3LANEpost)
2241     MAKE_CASE(AArch64ISD::ST4LANEpost)
2242     MAKE_CASE(AArch64ISD::SMULL)
2243     MAKE_CASE(AArch64ISD::UMULL)
2244     MAKE_CASE(AArch64ISD::FRECPE)
2245     MAKE_CASE(AArch64ISD::FRECPS)
2246     MAKE_CASE(AArch64ISD::FRSQRTE)
2247     MAKE_CASE(AArch64ISD::FRSQRTS)
2248     MAKE_CASE(AArch64ISD::STG)
2249     MAKE_CASE(AArch64ISD::STZG)
2250     MAKE_CASE(AArch64ISD::ST2G)
2251     MAKE_CASE(AArch64ISD::STZ2G)
2252     MAKE_CASE(AArch64ISD::SUNPKHI)
2253     MAKE_CASE(AArch64ISD::SUNPKLO)
2254     MAKE_CASE(AArch64ISD::UUNPKHI)
2255     MAKE_CASE(AArch64ISD::UUNPKLO)
2256     MAKE_CASE(AArch64ISD::INSR)
2257     MAKE_CASE(AArch64ISD::PTEST)
2258     MAKE_CASE(AArch64ISD::PTRUE)
2259     MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
2260     MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
2261     MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
2262     MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO)
2263     MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO)
2264     MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO)
2265     MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO)
2266     MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO)
2267     MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO)
2268     MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO)
2269     MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO)
2270     MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO)
2271     MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO)
2272     MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO)
2273     MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO)
2274     MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO)
2275     MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO)
2276     MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO)
2277     MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO)
2278     MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO)
2279     MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO)
2280     MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO)
2281     MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO)
2282     MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO)
2283     MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO)
2284     MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO)
2285     MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO)
2286     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO)
2287     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO)
2288     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO)
2289     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO)
2290     MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO)
2291     MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO)
2292     MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO)
2293     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO)
2294     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO)
2295     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO)
2296     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO)
2297     MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO)
2298     MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO)
2299     MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO)
2300     MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO)
2301     MAKE_CASE(AArch64ISD::ST1_PRED)
2302     MAKE_CASE(AArch64ISD::SST1_PRED)
2303     MAKE_CASE(AArch64ISD::SST1_SCALED_PRED)
2304     MAKE_CASE(AArch64ISD::SST1_SXTW_PRED)
2305     MAKE_CASE(AArch64ISD::SST1_UXTW_PRED)
2306     MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED)
2307     MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED)
2308     MAKE_CASE(AArch64ISD::SST1_IMM_PRED)
2309     MAKE_CASE(AArch64ISD::SSTNT1_PRED)
2310     MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED)
2311     MAKE_CASE(AArch64ISD::LDP)
2312     MAKE_CASE(AArch64ISD::STP)
2313     MAKE_CASE(AArch64ISD::STNP)
2314     MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
2315     MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
2316     MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU)
2317     MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU)
2318     MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU)
2319     MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU)
2320     MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU)
2321     MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
2322     MAKE_CASE(AArch64ISD::INDEX_VECTOR)
2323     MAKE_CASE(AArch64ISD::ADDP)
2324     MAKE_CASE(AArch64ISD::SADDLP)
2325     MAKE_CASE(AArch64ISD::UADDLP)
2326     MAKE_CASE(AArch64ISD::CALL_RVMARKER)
2327     MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL)
2328     MAKE_CASE(AArch64ISD::MOPS_MEMSET)
2329     MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING)
2330     MAKE_CASE(AArch64ISD::MOPS_MEMCOPY)
2331     MAKE_CASE(AArch64ISD::MOPS_MEMMOVE)
2332     MAKE_CASE(AArch64ISD::CALL_BTI)
2333   }
2334 #undef MAKE_CASE
2335   return nullptr;
2336 }
2337 
2338 MachineBasicBlock *
2339 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
2340                                     MachineBasicBlock *MBB) const {
2341   // We materialise the F128CSEL pseudo-instruction as some control flow and a
2342   // phi node:
2343 
2344   // OrigBB:
2345   //     [... previous instrs leading to comparison ...]
2346   //     b.ne TrueBB
2347   //     b EndBB
2348   // TrueBB:
2349   //     ; Fallthrough
2350   // EndBB:
2351   //     Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
2352 
2353   MachineFunction *MF = MBB->getParent();
2354   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2355   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2356   DebugLoc DL = MI.getDebugLoc();
2357   MachineFunction::iterator It = ++MBB->getIterator();
2358 
2359   Register DestReg = MI.getOperand(0).getReg();
2360   Register IfTrueReg = MI.getOperand(1).getReg();
2361   Register IfFalseReg = MI.getOperand(2).getReg();
2362   unsigned CondCode = MI.getOperand(3).getImm();
2363   bool NZCVKilled = MI.getOperand(4).isKill();
2364 
2365   MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
2366   MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
2367   MF->insert(It, TrueBB);
2368   MF->insert(It, EndBB);
2369 
2370   // Transfer rest of current basic-block to EndBB
2371   EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
2372                 MBB->end());
2373   EndBB->transferSuccessorsAndUpdatePHIs(MBB);
2374 
2375   BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
2376   BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
2377   MBB->addSuccessor(TrueBB);
2378   MBB->addSuccessor(EndBB);
2379 
2380   // TrueBB falls through to the end.
2381   TrueBB->addSuccessor(EndBB);
2382 
2383   if (!NZCVKilled) {
2384     TrueBB->addLiveIn(AArch64::NZCV);
2385     EndBB->addLiveIn(AArch64::NZCV);
2386   }
2387 
2388   BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
2389       .addReg(IfTrueReg)
2390       .addMBB(TrueBB)
2391       .addReg(IfFalseReg)
2392       .addMBB(MBB);
2393 
2394   MI.eraseFromParent();
2395   return EndBB;
2396 }
2397 
2398 MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
2399        MachineInstr &MI, MachineBasicBlock *BB) const {
2400   assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2401              BB->getParent()->getFunction().getPersonalityFn())) &&
2402          "SEH does not use catchret!");
2403   return BB;
2404 }
2405 
2406 MachineBasicBlock *
2407 AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
2408                                     MachineInstr &MI,
2409                                     MachineBasicBlock *BB) const {
2410   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2411   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2412 
2413   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2414   MIB.add(MI.getOperand(1)); // slice index register
2415   MIB.add(MI.getOperand(2)); // slice index offset
2416   MIB.add(MI.getOperand(3)); // pg
2417   MIB.add(MI.getOperand(4)); // base
2418   MIB.add(MI.getOperand(5)); // offset
2419 
2420   MI.eraseFromParent(); // The pseudo is gone now.
2421   return BB;
2422 }
2423 
2424 MachineBasicBlock *
2425 AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
2426   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2427   MachineInstrBuilder MIB =
2428       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA));
2429 
2430   MIB.addReg(AArch64::ZA, RegState::Define);
2431   MIB.add(MI.getOperand(0)); // Vector select register
2432   MIB.add(MI.getOperand(1)); // Vector select offset
2433   MIB.add(MI.getOperand(2)); // Base
2434   MIB.add(MI.getOperand(1)); // Offset, same as vector select offset
2435 
2436   MI.eraseFromParent(); // The pseudo is gone now.
2437   return BB;
2438 }
2439 
2440 MachineBasicBlock *
2441 AArch64TargetLowering::EmitMopa(unsigned Opc, unsigned BaseReg,
2442                                 MachineInstr &MI, MachineBasicBlock *BB) const {
2443   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2444   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2445 
2446   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2447   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2448   MIB.add(MI.getOperand(1)); // pn
2449   MIB.add(MI.getOperand(2)); // pm
2450   MIB.add(MI.getOperand(3)); // zn
2451   MIB.add(MI.getOperand(4)); // zm
2452 
2453   MI.eraseFromParent(); // The pseudo is gone now.
2454   return BB;
2455 }
2456 
2457 MachineBasicBlock *
2458 AArch64TargetLowering::EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg,
2459                                               MachineInstr &MI,
2460                                               MachineBasicBlock *BB) const {
2461   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2462   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2463 
2464   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2465   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2466   MIB.add(MI.getOperand(1)); // Slice index register
2467   MIB.add(MI.getOperand(2)); // Slice index offset
2468   MIB.add(MI.getOperand(3)); // pg
2469   MIB.add(MI.getOperand(4)); // zn
2470 
2471   MI.eraseFromParent(); // The pseudo is gone now.
2472   return BB;
2473 }
2474 
2475 MachineBasicBlock *
2476 AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
2477   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2478   MachineInstrBuilder MIB =
2479       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M));
2480   MIB.add(MI.getOperand(0)); // Mask
2481 
2482   unsigned Mask = MI.getOperand(0).getImm();
2483   for (unsigned I = 0; I < 8; I++) {
2484     if (Mask & (1 << I))
2485       MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine);
2486   }
2487 
2488   MI.eraseFromParent(); // The pseudo is gone now.
2489   return BB;
2490 }
2491 
2492 MachineBasicBlock *
2493 AArch64TargetLowering::EmitAddVectorToTile(unsigned Opc, unsigned BaseReg,
2494                                            MachineInstr &MI,
2495                                            MachineBasicBlock *BB) const {
2496   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2497   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2498 
2499   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2500   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2501   MIB.add(MI.getOperand(1)); // pn
2502   MIB.add(MI.getOperand(2)); // pm
2503   MIB.add(MI.getOperand(3)); // zn
2504 
2505   MI.eraseFromParent(); // The pseudo is gone now.
2506   return BB;
2507 }
2508 
2509 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
2510     MachineInstr &MI, MachineBasicBlock *BB) const {
2511   switch (MI.getOpcode()) {
2512   default:
2513 #ifndef NDEBUG
2514     MI.dump();
2515 #endif
2516     llvm_unreachable("Unexpected instruction for custom inserter!");
2517 
2518   case AArch64::F128CSEL:
2519     return EmitF128CSEL(MI, BB);
2520 
2521   case TargetOpcode::STATEPOINT:
2522     // STATEPOINT is a pseudo instruction which has no implicit defs/uses
2523     // while bl call instruction (where statepoint will be lowered at the end)
2524     // has implicit def. This def is early-clobber as it will be set at
2525     // the moment of the call and earlier than any use is read.
2526     // Add this implicit dead def here as a workaround.
2527     MI.addOperand(*MI.getMF(),
2528                   MachineOperand::CreateReg(
2529                       AArch64::LR, /*isDef*/ true,
2530                       /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
2531                       /*isUndef*/ false, /*isEarlyClobber*/ true));
2532     LLVM_FALLTHROUGH;
2533   case TargetOpcode::STACKMAP:
2534   case TargetOpcode::PATCHPOINT:
2535     return emitPatchPoint(MI, BB);
2536 
2537   case AArch64::CATCHRET:
2538     return EmitLoweredCatchRet(MI, BB);
2539   case AArch64::LD1_MXIPXX_H_PSEUDO_B:
2540     return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB);
2541   case AArch64::LD1_MXIPXX_H_PSEUDO_H:
2542     return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB);
2543   case AArch64::LD1_MXIPXX_H_PSEUDO_S:
2544     return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB);
2545   case AArch64::LD1_MXIPXX_H_PSEUDO_D:
2546     return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB);
2547   case AArch64::LD1_MXIPXX_H_PSEUDO_Q:
2548     return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB);
2549   case AArch64::LD1_MXIPXX_V_PSEUDO_B:
2550     return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB);
2551   case AArch64::LD1_MXIPXX_V_PSEUDO_H:
2552     return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB);
2553   case AArch64::LD1_MXIPXX_V_PSEUDO_S:
2554     return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB);
2555   case AArch64::LD1_MXIPXX_V_PSEUDO_D:
2556     return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB);
2557   case AArch64::LD1_MXIPXX_V_PSEUDO_Q:
2558     return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB);
2559   case AArch64::LDR_ZA_PSEUDO:
2560     return EmitFill(MI, BB);
2561   case AArch64::BFMOPA_MPPZZ_PSEUDO:
2562     return EmitMopa(AArch64::BFMOPA_MPPZZ, AArch64::ZAS0, MI, BB);
2563   case AArch64::BFMOPS_MPPZZ_PSEUDO:
2564     return EmitMopa(AArch64::BFMOPS_MPPZZ, AArch64::ZAS0, MI, BB);
2565   case AArch64::FMOPAL_MPPZZ_PSEUDO:
2566     return EmitMopa(AArch64::FMOPAL_MPPZZ, AArch64::ZAS0, MI, BB);
2567   case AArch64::FMOPSL_MPPZZ_PSEUDO:
2568     return EmitMopa(AArch64::FMOPSL_MPPZZ, AArch64::ZAS0, MI, BB);
2569   case AArch64::FMOPA_MPPZZ_S_PSEUDO:
2570     return EmitMopa(AArch64::FMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2571   case AArch64::FMOPS_MPPZZ_S_PSEUDO:
2572     return EmitMopa(AArch64::FMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2573   case AArch64::FMOPA_MPPZZ_D_PSEUDO:
2574     return EmitMopa(AArch64::FMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2575   case AArch64::FMOPS_MPPZZ_D_PSEUDO:
2576     return EmitMopa(AArch64::FMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2577   case AArch64::SMOPA_MPPZZ_S_PSEUDO:
2578     return EmitMopa(AArch64::SMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2579   case AArch64::SMOPS_MPPZZ_S_PSEUDO:
2580     return EmitMopa(AArch64::SMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2581   case AArch64::UMOPA_MPPZZ_S_PSEUDO:
2582     return EmitMopa(AArch64::UMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2583   case AArch64::UMOPS_MPPZZ_S_PSEUDO:
2584     return EmitMopa(AArch64::UMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2585   case AArch64::SUMOPA_MPPZZ_S_PSEUDO:
2586     return EmitMopa(AArch64::SUMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2587   case AArch64::SUMOPS_MPPZZ_S_PSEUDO:
2588     return EmitMopa(AArch64::SUMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2589   case AArch64::USMOPA_MPPZZ_S_PSEUDO:
2590     return EmitMopa(AArch64::USMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2591   case AArch64::USMOPS_MPPZZ_S_PSEUDO:
2592     return EmitMopa(AArch64::USMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2593   case AArch64::SMOPA_MPPZZ_D_PSEUDO:
2594     return EmitMopa(AArch64::SMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2595   case AArch64::SMOPS_MPPZZ_D_PSEUDO:
2596     return EmitMopa(AArch64::SMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2597   case AArch64::UMOPA_MPPZZ_D_PSEUDO:
2598     return EmitMopa(AArch64::UMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2599   case AArch64::UMOPS_MPPZZ_D_PSEUDO:
2600     return EmitMopa(AArch64::UMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2601   case AArch64::SUMOPA_MPPZZ_D_PSEUDO:
2602     return EmitMopa(AArch64::SUMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2603   case AArch64::SUMOPS_MPPZZ_D_PSEUDO:
2604     return EmitMopa(AArch64::SUMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2605   case AArch64::USMOPA_MPPZZ_D_PSEUDO:
2606     return EmitMopa(AArch64::USMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2607   case AArch64::USMOPS_MPPZZ_D_PSEUDO:
2608     return EmitMopa(AArch64::USMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2609   case AArch64::INSERT_MXIPZ_H_PSEUDO_B:
2610     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_B, AArch64::ZAB0, MI,
2611                                   BB);
2612   case AArch64::INSERT_MXIPZ_H_PSEUDO_H:
2613     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_H, AArch64::ZAH0, MI,
2614                                   BB);
2615   case AArch64::INSERT_MXIPZ_H_PSEUDO_S:
2616     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_S, AArch64::ZAS0, MI,
2617                                   BB);
2618   case AArch64::INSERT_MXIPZ_H_PSEUDO_D:
2619     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_D, AArch64::ZAD0, MI,
2620                                   BB);
2621   case AArch64::INSERT_MXIPZ_H_PSEUDO_Q:
2622     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_Q, AArch64::ZAQ0, MI,
2623                                   BB);
2624   case AArch64::INSERT_MXIPZ_V_PSEUDO_B:
2625     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_B, AArch64::ZAB0, MI,
2626                                   BB);
2627   case AArch64::INSERT_MXIPZ_V_PSEUDO_H:
2628     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_H, AArch64::ZAH0, MI,
2629                                   BB);
2630   case AArch64::INSERT_MXIPZ_V_PSEUDO_S:
2631     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_S, AArch64::ZAS0, MI,
2632                                   BB);
2633   case AArch64::INSERT_MXIPZ_V_PSEUDO_D:
2634     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_D, AArch64::ZAD0, MI,
2635                                   BB);
2636   case AArch64::INSERT_MXIPZ_V_PSEUDO_Q:
2637     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_Q, AArch64::ZAQ0, MI,
2638                                   BB);
2639   case AArch64::ZERO_M_PSEUDO:
2640     return EmitZero(MI, BB);
2641   case AArch64::ADDHA_MPPZ_PSEUDO_S:
2642     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_S, AArch64::ZAS0, MI, BB);
2643   case AArch64::ADDVA_MPPZ_PSEUDO_S:
2644     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_S, AArch64::ZAS0, MI, BB);
2645   case AArch64::ADDHA_MPPZ_PSEUDO_D:
2646     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_D, AArch64::ZAD0, MI, BB);
2647   case AArch64::ADDVA_MPPZ_PSEUDO_D:
2648     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_D, AArch64::ZAD0, MI, BB);
2649   }
2650 }
2651 
2652 //===----------------------------------------------------------------------===//
2653 // AArch64 Lowering private implementation.
2654 //===----------------------------------------------------------------------===//
2655 
2656 //===----------------------------------------------------------------------===//
2657 // Lowering Code
2658 //===----------------------------------------------------------------------===//
2659 
2660 // Forward declarations of SVE fixed length lowering helpers
2661 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
2662 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2663 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2664 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
2665                                                 SelectionDAG &DAG);
2666 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
2667                                              EVT VT);
2668 
2669 /// isZerosVector - Check whether SDNode N is a zero-filled vector.
2670 static bool isZerosVector(const SDNode *N) {
2671   // Look through a bit convert.
2672   while (N->getOpcode() == ISD::BITCAST)
2673     N = N->getOperand(0).getNode();
2674 
2675   if (ISD::isConstantSplatVectorAllZeros(N))
2676     return true;
2677 
2678   if (N->getOpcode() != AArch64ISD::DUP)
2679     return false;
2680 
2681   auto Opnd0 = N->getOperand(0);
2682   auto *CINT = dyn_cast<ConstantSDNode>(Opnd0);
2683   auto *CFP = dyn_cast<ConstantFPSDNode>(Opnd0);
2684   return (CINT && CINT->isZero()) || (CFP && CFP->isZero());
2685 }
2686 
2687 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
2688 /// CC
2689 static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
2690   switch (CC) {
2691   default:
2692     llvm_unreachable("Unknown condition code!");
2693   case ISD::SETNE:
2694     return AArch64CC::NE;
2695   case ISD::SETEQ:
2696     return AArch64CC::EQ;
2697   case ISD::SETGT:
2698     return AArch64CC::GT;
2699   case ISD::SETGE:
2700     return AArch64CC::GE;
2701   case ISD::SETLT:
2702     return AArch64CC::LT;
2703   case ISD::SETLE:
2704     return AArch64CC::LE;
2705   case ISD::SETUGT:
2706     return AArch64CC::HI;
2707   case ISD::SETUGE:
2708     return AArch64CC::HS;
2709   case ISD::SETULT:
2710     return AArch64CC::LO;
2711   case ISD::SETULE:
2712     return AArch64CC::LS;
2713   }
2714 }
2715 
2716 /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
2717 static void changeFPCCToAArch64CC(ISD::CondCode CC,
2718                                   AArch64CC::CondCode &CondCode,
2719                                   AArch64CC::CondCode &CondCode2) {
2720   CondCode2 = AArch64CC::AL;
2721   switch (CC) {
2722   default:
2723     llvm_unreachable("Unknown FP condition!");
2724   case ISD::SETEQ:
2725   case ISD::SETOEQ:
2726     CondCode = AArch64CC::EQ;
2727     break;
2728   case ISD::SETGT:
2729   case ISD::SETOGT:
2730     CondCode = AArch64CC::GT;
2731     break;
2732   case ISD::SETGE:
2733   case ISD::SETOGE:
2734     CondCode = AArch64CC::GE;
2735     break;
2736   case ISD::SETOLT:
2737     CondCode = AArch64CC::MI;
2738     break;
2739   case ISD::SETOLE:
2740     CondCode = AArch64CC::LS;
2741     break;
2742   case ISD::SETONE:
2743     CondCode = AArch64CC::MI;
2744     CondCode2 = AArch64CC::GT;
2745     break;
2746   case ISD::SETO:
2747     CondCode = AArch64CC::VC;
2748     break;
2749   case ISD::SETUO:
2750     CondCode = AArch64CC::VS;
2751     break;
2752   case ISD::SETUEQ:
2753     CondCode = AArch64CC::EQ;
2754     CondCode2 = AArch64CC::VS;
2755     break;
2756   case ISD::SETUGT:
2757     CondCode = AArch64CC::HI;
2758     break;
2759   case ISD::SETUGE:
2760     CondCode = AArch64CC::PL;
2761     break;
2762   case ISD::SETLT:
2763   case ISD::SETULT:
2764     CondCode = AArch64CC::LT;
2765     break;
2766   case ISD::SETLE:
2767   case ISD::SETULE:
2768     CondCode = AArch64CC::LE;
2769     break;
2770   case ISD::SETNE:
2771   case ISD::SETUNE:
2772     CondCode = AArch64CC::NE;
2773     break;
2774   }
2775 }
2776 
2777 /// Convert a DAG fp condition code to an AArch64 CC.
2778 /// This differs from changeFPCCToAArch64CC in that it returns cond codes that
2779 /// should be AND'ed instead of OR'ed.
2780 static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
2781                                      AArch64CC::CondCode &CondCode,
2782                                      AArch64CC::CondCode &CondCode2) {
2783   CondCode2 = AArch64CC::AL;
2784   switch (CC) {
2785   default:
2786     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2787     assert(CondCode2 == AArch64CC::AL);
2788     break;
2789   case ISD::SETONE:
2790     // (a one b)
2791     // == ((a olt b) || (a ogt b))
2792     // == ((a ord b) && (a une b))
2793     CondCode = AArch64CC::VC;
2794     CondCode2 = AArch64CC::NE;
2795     break;
2796   case ISD::SETUEQ:
2797     // (a ueq b)
2798     // == ((a uno b) || (a oeq b))
2799     // == ((a ule b) && (a uge b))
2800     CondCode = AArch64CC::PL;
2801     CondCode2 = AArch64CC::LE;
2802     break;
2803   }
2804 }
2805 
2806 /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
2807 /// CC usable with the vector instructions. Fewer operations are available
2808 /// without a real NZCV register, so we have to use less efficient combinations
2809 /// to get the same effect.
2810 static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
2811                                         AArch64CC::CondCode &CondCode,
2812                                         AArch64CC::CondCode &CondCode2,
2813                                         bool &Invert) {
2814   Invert = false;
2815   switch (CC) {
2816   default:
2817     // Mostly the scalar mappings work fine.
2818     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2819     break;
2820   case ISD::SETUO:
2821     Invert = true;
2822     LLVM_FALLTHROUGH;
2823   case ISD::SETO:
2824     CondCode = AArch64CC::MI;
2825     CondCode2 = AArch64CC::GE;
2826     break;
2827   case ISD::SETUEQ:
2828   case ISD::SETULT:
2829   case ISD::SETULE:
2830   case ISD::SETUGT:
2831   case ISD::SETUGE:
2832     // All of the compare-mask comparisons are ordered, but we can switch
2833     // between the two by a double inversion. E.g. ULE == !OGT.
2834     Invert = true;
2835     changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32),
2836                           CondCode, CondCode2);
2837     break;
2838   }
2839 }
2840 
2841 static bool isLegalArithImmed(uint64_t C) {
2842   // Matches AArch64DAGToDAGISel::SelectArithImmed().
2843   bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
2844   LLVM_DEBUG(dbgs() << "Is imm " << C
2845                     << " legal: " << (IsLegal ? "yes\n" : "no\n"));
2846   return IsLegal;
2847 }
2848 
2849 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
2850 // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
2851 // can be set differently by this operation. It comes down to whether
2852 // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
2853 // everything is fine. If not then the optimization is wrong. Thus general
2854 // comparisons are only valid if op2 != 0.
2855 //
2856 // So, finally, the only LLVM-native comparisons that don't mention C and V
2857 // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
2858 // the absence of information about op2.
2859 static bool isCMN(SDValue Op, ISD::CondCode CC) {
2860   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
2861          (CC == ISD::SETEQ || CC == ISD::SETNE);
2862 }
2863 
2864 static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
2865                                       SelectionDAG &DAG, SDValue Chain,
2866                                       bool IsSignaling) {
2867   EVT VT = LHS.getValueType();
2868   assert(VT != MVT::f128);
2869 
2870   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2871 
2872   if (VT == MVT::f16 && !FullFP16) {
2873     LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2874                       {Chain, LHS});
2875     RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2876                       {LHS.getValue(1), RHS});
2877     Chain = RHS.getValue(1);
2878     VT = MVT::f32;
2879   }
2880   unsigned Opcode =
2881       IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP;
2882   return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS});
2883 }
2884 
2885 static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2886                               const SDLoc &dl, SelectionDAG &DAG) {
2887   EVT VT = LHS.getValueType();
2888   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2889 
2890   if (VT.isFloatingPoint()) {
2891     assert(VT != MVT::f128);
2892     if (VT == MVT::f16 && !FullFP16) {
2893       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
2894       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
2895       VT = MVT::f32;
2896     }
2897     return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
2898   }
2899 
2900   // The CMP instruction is just an alias for SUBS, and representing it as
2901   // SUBS means that it's possible to get CSE with subtract operations.
2902   // A later phase can perform the optimization of setting the destination
2903   // register to WZR/XZR if it ends up being unused.
2904   unsigned Opcode = AArch64ISD::SUBS;
2905 
2906   if (isCMN(RHS, CC)) {
2907     // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
2908     Opcode = AArch64ISD::ADDS;
2909     RHS = RHS.getOperand(1);
2910   } else if (isCMN(LHS, CC)) {
2911     // As we are looking for EQ/NE compares, the operands can be commuted ; can
2912     // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
2913     Opcode = AArch64ISD::ADDS;
2914     LHS = LHS.getOperand(1);
2915   } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
2916     if (LHS.getOpcode() == ISD::AND) {
2917       // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
2918       // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
2919       // of the signed comparisons.
2920       const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl,
2921                                            DAG.getVTList(VT, MVT_CC),
2922                                            LHS.getOperand(0),
2923                                            LHS.getOperand(1));
2924       // Replace all users of (and X, Y) with newly generated (ands X, Y)
2925       DAG.ReplaceAllUsesWith(LHS, ANDSNode);
2926       return ANDSNode.getValue(1);
2927     } else if (LHS.getOpcode() == AArch64ISD::ANDS) {
2928       // Use result of ANDS
2929       return LHS.getValue(1);
2930     }
2931   }
2932 
2933   return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
2934       .getValue(1);
2935 }
2936 
2937 /// \defgroup AArch64CCMP CMP;CCMP matching
2938 ///
2939 /// These functions deal with the formation of CMP;CCMP;... sequences.
2940 /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
2941 /// a comparison. They set the NZCV flags to a predefined value if their
2942 /// predicate is false. This allows to express arbitrary conjunctions, for
2943 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
2944 /// expressed as:
2945 ///   cmp A
2946 ///   ccmp B, inv(CB), CA
2947 ///   check for CB flags
2948 ///
2949 /// This naturally lets us implement chains of AND operations with SETCC
2950 /// operands. And we can even implement some other situations by transforming
2951 /// them:
2952 ///   - We can implement (NEG SETCC) i.e. negating a single comparison by
2953 ///     negating the flags used in a CCMP/FCCMP operations.
2954 ///   - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
2955 ///     by negating the flags we test for afterwards. i.e.
2956 ///     NEG (CMP CCMP CCCMP ...) can be implemented.
2957 ///   - Note that we can only ever negate all previously processed results.
2958 ///     What we can not implement by flipping the flags to test is a negation
2959 ///     of two sub-trees (because the negation affects all sub-trees emitted so
2960 ///     far, so the 2nd sub-tree we emit would also affect the first).
2961 /// With those tools we can implement some OR operations:
2962 ///   - (OR (SETCC A) (SETCC B)) can be implemented via:
2963 ///     NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
2964 ///   - After transforming OR to NEG/AND combinations we may be able to use NEG
2965 ///     elimination rules from earlier to implement the whole thing as a
2966 ///     CCMP/FCCMP chain.
2967 ///
2968 /// As complete example:
2969 ///     or (or (setCA (cmp A)) (setCB (cmp B)))
2970 ///        (and (setCC (cmp C)) (setCD (cmp D)))"
2971 /// can be reassociated to:
2972 ///     or (and (setCC (cmp C)) setCD (cmp D))
2973 //         (or (setCA (cmp A)) (setCB (cmp B)))
2974 /// can be transformed to:
2975 ///     not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
2976 ///              (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
2977 /// which can be implemented as:
2978 ///   cmp C
2979 ///   ccmp D, inv(CD), CC
2980 ///   ccmp A, CA, inv(CD)
2981 ///   ccmp B, CB, inv(CA)
2982 ///   check for CB flags
2983 ///
2984 /// A counterexample is "or (and A B) (and C D)" which translates to
2985 /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
2986 /// can only implement 1 of the inner (not) operations, but not both!
2987 /// @{
2988 
2989 /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
2990 static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
2991                                          ISD::CondCode CC, SDValue CCOp,
2992                                          AArch64CC::CondCode Predicate,
2993                                          AArch64CC::CondCode OutCC,
2994                                          const SDLoc &DL, SelectionDAG &DAG) {
2995   unsigned Opcode = 0;
2996   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2997 
2998   if (LHS.getValueType().isFloatingPoint()) {
2999     assert(LHS.getValueType() != MVT::f128);
3000     if (LHS.getValueType() == MVT::f16 && !FullFP16) {
3001       LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
3002       RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
3003     }
3004     Opcode = AArch64ISD::FCCMP;
3005   } else if (RHS.getOpcode() == ISD::SUB) {
3006     SDValue SubOp0 = RHS.getOperand(0);
3007     if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3008       // See emitComparison() on why we can only do this for SETEQ and SETNE.
3009       Opcode = AArch64ISD::CCMN;
3010       RHS = RHS.getOperand(1);
3011     }
3012   }
3013   if (Opcode == 0)
3014     Opcode = AArch64ISD::CCMP;
3015 
3016   SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
3017   AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
3018   unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
3019   SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
3020   return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
3021 }
3022 
3023 /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
3024 /// expressed as a conjunction. See \ref AArch64CCMP.
3025 /// \param CanNegate    Set to true if we can negate the whole sub-tree just by
3026 ///                     changing the conditions on the SETCC tests.
3027 ///                     (this means we can call emitConjunctionRec() with
3028 ///                      Negate==true on this sub-tree)
3029 /// \param MustBeFirst  Set to true if this subtree needs to be negated and we
3030 ///                     cannot do the negation naturally. We are required to
3031 ///                     emit the subtree first in this case.
3032 /// \param WillNegate   Is true if are called when the result of this
3033 ///                     subexpression must be negated. This happens when the
3034 ///                     outer expression is an OR. We can use this fact to know
3035 ///                     that we have a double negation (or (or ...) ...) that
3036 ///                     can be implemented for free.
3037 static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
3038                                bool &MustBeFirst, bool WillNegate,
3039                                unsigned Depth = 0) {
3040   if (!Val.hasOneUse())
3041     return false;
3042   unsigned Opcode = Val->getOpcode();
3043   if (Opcode == ISD::SETCC) {
3044     if (Val->getOperand(0).getValueType() == MVT::f128)
3045       return false;
3046     CanNegate = true;
3047     MustBeFirst = false;
3048     return true;
3049   }
3050   // Protect against exponential runtime and stack overflow.
3051   if (Depth > 6)
3052     return false;
3053   if (Opcode == ISD::AND || Opcode == ISD::OR) {
3054     bool IsOR = Opcode == ISD::OR;
3055     SDValue O0 = Val->getOperand(0);
3056     SDValue O1 = Val->getOperand(1);
3057     bool CanNegateL;
3058     bool MustBeFirstL;
3059     if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
3060       return false;
3061     bool CanNegateR;
3062     bool MustBeFirstR;
3063     if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
3064       return false;
3065 
3066     if (MustBeFirstL && MustBeFirstR)
3067       return false;
3068 
3069     if (IsOR) {
3070       // For an OR expression we need to be able to naturally negate at least
3071       // one side or we cannot do the transformation at all.
3072       if (!CanNegateL && !CanNegateR)
3073         return false;
3074       // If we the result of the OR will be negated and we can naturally negate
3075       // the leafs, then this sub-tree as a whole negates naturally.
3076       CanNegate = WillNegate && CanNegateL && CanNegateR;
3077       // If we cannot naturally negate the whole sub-tree, then this must be
3078       // emitted first.
3079       MustBeFirst = !CanNegate;
3080     } else {
3081       assert(Opcode == ISD::AND && "Must be OR or AND");
3082       // We cannot naturally negate an AND operation.
3083       CanNegate = false;
3084       MustBeFirst = MustBeFirstL || MustBeFirstR;
3085     }
3086     return true;
3087   }
3088   return false;
3089 }
3090 
3091 /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
3092 /// of CCMP/CFCMP ops. See @ref AArch64CCMP.
3093 /// Tries to transform the given i1 producing node @p Val to a series compare
3094 /// and conditional compare operations. @returns an NZCV flags producing node
3095 /// and sets @p OutCC to the flags that should be tested or returns SDValue() if
3096 /// transformation was not possible.
3097 /// \p Negate is true if we want this sub-tree being negated just by changing
3098 /// SETCC conditions.
3099 static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
3100     AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
3101     AArch64CC::CondCode Predicate) {
3102   // We're at a tree leaf, produce a conditional comparison operation.
3103   unsigned Opcode = Val->getOpcode();
3104   if (Opcode == ISD::SETCC) {
3105     SDValue LHS = Val->getOperand(0);
3106     SDValue RHS = Val->getOperand(1);
3107     ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
3108     bool isInteger = LHS.getValueType().isInteger();
3109     if (Negate)
3110       CC = getSetCCInverse(CC, LHS.getValueType());
3111     SDLoc DL(Val);
3112     // Determine OutCC and handle FP special case.
3113     if (isInteger) {
3114       OutCC = changeIntCCToAArch64CC(CC);
3115     } else {
3116       assert(LHS.getValueType().isFloatingPoint());
3117       AArch64CC::CondCode ExtraCC;
3118       changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
3119       // Some floating point conditions can't be tested with a single condition
3120       // code. Construct an additional comparison in this case.
3121       if (ExtraCC != AArch64CC::AL) {
3122         SDValue ExtraCmp;
3123         if (!CCOp.getNode())
3124           ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
3125         else
3126           ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
3127                                                ExtraCC, DL, DAG);
3128         CCOp = ExtraCmp;
3129         Predicate = ExtraCC;
3130       }
3131     }
3132 
3133     // Produce a normal comparison if we are first in the chain
3134     if (!CCOp)
3135       return emitComparison(LHS, RHS, CC, DL, DAG);
3136     // Otherwise produce a ccmp.
3137     return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
3138                                      DAG);
3139   }
3140   assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
3141 
3142   bool IsOR = Opcode == ISD::OR;
3143 
3144   SDValue LHS = Val->getOperand(0);
3145   bool CanNegateL;
3146   bool MustBeFirstL;
3147   bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
3148   assert(ValidL && "Valid conjunction/disjunction tree");
3149   (void)ValidL;
3150 
3151   SDValue RHS = Val->getOperand(1);
3152   bool CanNegateR;
3153   bool MustBeFirstR;
3154   bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
3155   assert(ValidR && "Valid conjunction/disjunction tree");
3156   (void)ValidR;
3157 
3158   // Swap sub-tree that must come first to the right side.
3159   if (MustBeFirstL) {
3160     assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
3161     std::swap(LHS, RHS);
3162     std::swap(CanNegateL, CanNegateR);
3163     std::swap(MustBeFirstL, MustBeFirstR);
3164   }
3165 
3166   bool NegateR;
3167   bool NegateAfterR;
3168   bool NegateL;
3169   bool NegateAfterAll;
3170   if (Opcode == ISD::OR) {
3171     // Swap the sub-tree that we can negate naturally to the left.
3172     if (!CanNegateL) {
3173       assert(CanNegateR && "at least one side must be negatable");
3174       assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
3175       assert(!Negate);
3176       std::swap(LHS, RHS);
3177       NegateR = false;
3178       NegateAfterR = true;
3179     } else {
3180       // Negate the left sub-tree if possible, otherwise negate the result.
3181       NegateR = CanNegateR;
3182       NegateAfterR = !CanNegateR;
3183     }
3184     NegateL = true;
3185     NegateAfterAll = !Negate;
3186   } else {
3187     assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
3188     assert(!Negate && "Valid conjunction/disjunction tree");
3189 
3190     NegateL = false;
3191     NegateR = false;
3192     NegateAfterR = false;
3193     NegateAfterAll = false;
3194   }
3195 
3196   // Emit sub-trees.
3197   AArch64CC::CondCode RHSCC;
3198   SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
3199   if (NegateAfterR)
3200     RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
3201   SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
3202   if (NegateAfterAll)
3203     OutCC = AArch64CC::getInvertedCondCode(OutCC);
3204   return CmpL;
3205 }
3206 
3207 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
3208 /// In some cases this is even possible with OR operations in the expression.
3209 /// See \ref AArch64CCMP.
3210 /// \see emitConjunctionRec().
3211 static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
3212                                AArch64CC::CondCode &OutCC) {
3213   bool DummyCanNegate;
3214   bool DummyMustBeFirst;
3215   if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
3216     return SDValue();
3217 
3218   return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
3219 }
3220 
3221 /// @}
3222 
3223 /// Returns how profitable it is to fold a comparison's operand's shift and/or
3224 /// extension operations.
3225 static unsigned getCmpOperandFoldingProfit(SDValue Op) {
3226   auto isSupportedExtend = [&](SDValue V) {
3227     if (V.getOpcode() == ISD::SIGN_EXTEND_INREG)
3228       return true;
3229 
3230     if (V.getOpcode() == ISD::AND)
3231       if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3232         uint64_t Mask = MaskCst->getZExtValue();
3233         return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
3234       }
3235 
3236     return false;
3237   };
3238 
3239   if (!Op.hasOneUse())
3240     return 0;
3241 
3242   if (isSupportedExtend(Op))
3243     return 1;
3244 
3245   unsigned Opc = Op.getOpcode();
3246   if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA)
3247     if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3248       uint64_t Shift = ShiftCst->getZExtValue();
3249       if (isSupportedExtend(Op.getOperand(0)))
3250         return (Shift <= 4) ? 2 : 1;
3251       EVT VT = Op.getValueType();
3252       if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63))
3253         return 1;
3254     }
3255 
3256   return 0;
3257 }
3258 
3259 static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3260                              SDValue &AArch64cc, SelectionDAG &DAG,
3261                              const SDLoc &dl) {
3262   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3263     EVT VT = RHS.getValueType();
3264     uint64_t C = RHSC->getZExtValue();
3265     if (!isLegalArithImmed(C)) {
3266       // Constant does not fit, try adjusting it by one?
3267       switch (CC) {
3268       default:
3269         break;
3270       case ISD::SETLT:
3271       case ISD::SETGE:
3272         if ((VT == MVT::i32 && C != 0x80000000 &&
3273              isLegalArithImmed((uint32_t)(C - 1))) ||
3274             (VT == MVT::i64 && C != 0x80000000ULL &&
3275              isLegalArithImmed(C - 1ULL))) {
3276           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3277           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3278           RHS = DAG.getConstant(C, dl, VT);
3279         }
3280         break;
3281       case ISD::SETULT:
3282       case ISD::SETUGE:
3283         if ((VT == MVT::i32 && C != 0 &&
3284              isLegalArithImmed((uint32_t)(C - 1))) ||
3285             (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
3286           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3287           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3288           RHS = DAG.getConstant(C, dl, VT);
3289         }
3290         break;
3291       case ISD::SETLE:
3292       case ISD::SETGT:
3293         if ((VT == MVT::i32 && C != INT32_MAX &&
3294              isLegalArithImmed((uint32_t)(C + 1))) ||
3295             (VT == MVT::i64 && C != INT64_MAX &&
3296              isLegalArithImmed(C + 1ULL))) {
3297           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3298           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3299           RHS = DAG.getConstant(C, dl, VT);
3300         }
3301         break;
3302       case ISD::SETULE:
3303       case ISD::SETUGT:
3304         if ((VT == MVT::i32 && C != UINT32_MAX &&
3305              isLegalArithImmed((uint32_t)(C + 1))) ||
3306             (VT == MVT::i64 && C != UINT64_MAX &&
3307              isLegalArithImmed(C + 1ULL))) {
3308           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3309           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3310           RHS = DAG.getConstant(C, dl, VT);
3311         }
3312         break;
3313       }
3314     }
3315   }
3316 
3317   // Comparisons are canonicalized so that the RHS operand is simpler than the
3318   // LHS one, the extreme case being when RHS is an immediate. However, AArch64
3319   // can fold some shift+extend operations on the RHS operand, so swap the
3320   // operands if that can be done.
3321   //
3322   // For example:
3323   //    lsl     w13, w11, #1
3324   //    cmp     w13, w12
3325   // can be turned into:
3326   //    cmp     w12, w11, lsl #1
3327   if (!isa<ConstantSDNode>(RHS) ||
3328       !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
3329     SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
3330 
3331     if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
3332       std::swap(LHS, RHS);
3333       CC = ISD::getSetCCSwappedOperands(CC);
3334     }
3335   }
3336 
3337   SDValue Cmp;
3338   AArch64CC::CondCode AArch64CC;
3339   if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
3340     const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
3341 
3342     // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
3343     // For the i8 operand, the largest immediate is 255, so this can be easily
3344     // encoded in the compare instruction. For the i16 operand, however, the
3345     // largest immediate cannot be encoded in the compare.
3346     // Therefore, use a sign extending load and cmn to avoid materializing the
3347     // -1 constant. For example,
3348     // movz w1, #65535
3349     // ldrh w0, [x0, #0]
3350     // cmp w0, w1
3351     // >
3352     // ldrsh w0, [x0, #0]
3353     // cmn w0, #1
3354     // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
3355     // if and only if (sext LHS) == (sext RHS). The checks are in place to
3356     // ensure both the LHS and RHS are truly zero extended and to make sure the
3357     // transformation is profitable.
3358     if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
3359         cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
3360         cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
3361         LHS.getNode()->hasNUsesOfValue(1, 0)) {
3362       int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
3363       if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
3364         SDValue SExt =
3365             DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
3366                         DAG.getValueType(MVT::i16));
3367         Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
3368                                                    RHS.getValueType()),
3369                              CC, dl, DAG);
3370         AArch64CC = changeIntCCToAArch64CC(CC);
3371       }
3372     }
3373 
3374     if (!Cmp && (RHSC->isZero() || RHSC->isOne())) {
3375       if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
3376         if ((CC == ISD::SETNE) ^ RHSC->isZero())
3377           AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
3378       }
3379     }
3380   }
3381 
3382   if (!Cmp) {
3383     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
3384     AArch64CC = changeIntCCToAArch64CC(CC);
3385   }
3386   AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
3387   return Cmp;
3388 }
3389 
3390 static std::pair<SDValue, SDValue>
3391 getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
3392   assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
3393          "Unsupported value type");
3394   SDValue Value, Overflow;
3395   SDLoc DL(Op);
3396   SDValue LHS = Op.getOperand(0);
3397   SDValue RHS = Op.getOperand(1);
3398   unsigned Opc = 0;
3399   switch (Op.getOpcode()) {
3400   default:
3401     llvm_unreachable("Unknown overflow instruction!");
3402   case ISD::SADDO:
3403     Opc = AArch64ISD::ADDS;
3404     CC = AArch64CC::VS;
3405     break;
3406   case ISD::UADDO:
3407     Opc = AArch64ISD::ADDS;
3408     CC = AArch64CC::HS;
3409     break;
3410   case ISD::SSUBO:
3411     Opc = AArch64ISD::SUBS;
3412     CC = AArch64CC::VS;
3413     break;
3414   case ISD::USUBO:
3415     Opc = AArch64ISD::SUBS;
3416     CC = AArch64CC::LO;
3417     break;
3418   // Multiply needs a little bit extra work.
3419   case ISD::SMULO:
3420   case ISD::UMULO: {
3421     CC = AArch64CC::NE;
3422     bool IsSigned = Op.getOpcode() == ISD::SMULO;
3423     if (Op.getValueType() == MVT::i32) {
3424       // Extend to 64-bits, then perform a 64-bit multiply.
3425       unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3426       LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
3427       RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
3428       SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3429       Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
3430 
3431       // Check that the result fits into a 32-bit integer.
3432       SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC);
3433       if (IsSigned) {
3434         // cmp xreg, wreg, sxtw
3435         SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value);
3436         Overflow =
3437             DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1);
3438       } else {
3439         // tst xreg, #0xffffffff00000000
3440         SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64);
3441         Overflow =
3442             DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1);
3443       }
3444       break;
3445     }
3446     assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
3447     // For the 64 bit multiply
3448     Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3449     if (IsSigned) {
3450       SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
3451       SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
3452                                       DAG.getConstant(63, DL, MVT::i64));
3453       // It is important that LowerBits is last, otherwise the arithmetic
3454       // shift will not be folded into the compare (SUBS).
3455       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3456       Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
3457                      .getValue(1);
3458     } else {
3459       SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
3460       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3461       Overflow =
3462           DAG.getNode(AArch64ISD::SUBS, DL, VTs,
3463                       DAG.getConstant(0, DL, MVT::i64),
3464                       UpperBits).getValue(1);
3465     }
3466     break;
3467   }
3468   } // switch (...)
3469 
3470   if (Opc) {
3471     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
3472 
3473     // Emit the AArch64 operation with overflow check.
3474     Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
3475     Overflow = Value.getValue(1);
3476   }
3477   return std::make_pair(Value, Overflow);
3478 }
3479 
3480 SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
3481   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3482     return LowerToScalableOp(Op, DAG);
3483 
3484   SDValue Sel = Op.getOperand(0);
3485   SDValue Other = Op.getOperand(1);
3486   SDLoc dl(Sel);
3487 
3488   // If the operand is an overflow checking operation, invert the condition
3489   // code and kill the Not operation. I.e., transform:
3490   // (xor (overflow_op_bool, 1))
3491   //   -->
3492   // (csel 1, 0, invert(cc), overflow_op_bool)
3493   // ... which later gets transformed to just a cset instruction with an
3494   // inverted condition code, rather than a cset + eor sequence.
3495   if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) {
3496     // Only lower legal XALUO ops.
3497     if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0)))
3498       return SDValue();
3499 
3500     SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3501     SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3502     AArch64CC::CondCode CC;
3503     SDValue Value, Overflow;
3504     std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG);
3505     SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3506     return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal,
3507                        CCVal, Overflow);
3508   }
3509   // If neither operand is a SELECT_CC, give up.
3510   if (Sel.getOpcode() != ISD::SELECT_CC)
3511     std::swap(Sel, Other);
3512   if (Sel.getOpcode() != ISD::SELECT_CC)
3513     return Op;
3514 
3515   // The folding we want to perform is:
3516   // (xor x, (select_cc a, b, cc, 0, -1) )
3517   //   -->
3518   // (csel x, (xor x, -1), cc ...)
3519   //
3520   // The latter will get matched to a CSINV instruction.
3521 
3522   ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
3523   SDValue LHS = Sel.getOperand(0);
3524   SDValue RHS = Sel.getOperand(1);
3525   SDValue TVal = Sel.getOperand(2);
3526   SDValue FVal = Sel.getOperand(3);
3527 
3528   // FIXME: This could be generalized to non-integer comparisons.
3529   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
3530     return Op;
3531 
3532   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
3533   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
3534 
3535   // The values aren't constants, this isn't the pattern we're looking for.
3536   if (!CFVal || !CTVal)
3537     return Op;
3538 
3539   // We can commute the SELECT_CC by inverting the condition.  This
3540   // might be needed to make this fit into a CSINV pattern.
3541   if (CTVal->isAllOnes() && CFVal->isZero()) {
3542     std::swap(TVal, FVal);
3543     std::swap(CTVal, CFVal);
3544     CC = ISD::getSetCCInverse(CC, LHS.getValueType());
3545   }
3546 
3547   // If the constants line up, perform the transform!
3548   if (CTVal->isZero() && CFVal->isAllOnes()) {
3549     SDValue CCVal;
3550     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
3551 
3552     FVal = Other;
3553     TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
3554                        DAG.getConstant(-1ULL, dl, Other.getValueType()));
3555 
3556     return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
3557                        CCVal, Cmp);
3558   }
3559 
3560   return Op;
3561 }
3562 
3563 // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
3564 // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
3565 // sets 'C' bit to 0.
3566 static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {
3567   SDLoc DL(Value);
3568   EVT VT = Value.getValueType();
3569   SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value;
3570   SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT);
3571   SDValue Cmp =
3572       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1);
3573   return Cmp.getValue(1);
3574 }
3575 
3576 // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
3577 // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
3578 static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG,
3579                                 bool Invert) {
3580   assert(Flag.getResNo() == 1);
3581   SDLoc DL(Flag);
3582   SDValue Zero = DAG.getConstant(0, DL, VT);
3583   SDValue One = DAG.getConstant(1, DL, VT);
3584   unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS;
3585   SDValue CC = DAG.getConstant(Cond, DL, MVT::i32);
3586   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3587 }
3588 
3589 // Value is 1 if 'V' bit of NZCV is 1, else 0
3590 static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
3591   assert(Flag.getResNo() == 1);
3592   SDLoc DL(Flag);
3593   SDValue Zero = DAG.getConstant(0, DL, VT);
3594   SDValue One = DAG.getConstant(1, DL, VT);
3595   SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
3596   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3597 }
3598 
3599 // This lowering is inefficient, but it will get cleaned up by
3600 // `foldOverflowCheck`
3601 static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
3602                                 bool IsSigned) {
3603   EVT VT0 = Op.getValue(0).getValueType();
3604   EVT VT1 = Op.getValue(1).getValueType();
3605 
3606   if (VT0 != MVT::i32 && VT0 != MVT::i64)
3607     return SDValue();
3608 
3609   bool InvertCarry = Opcode == AArch64ISD::SBCS;
3610   SDValue OpLHS = Op.getOperand(0);
3611   SDValue OpRHS = Op.getOperand(1);
3612   SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry);
3613 
3614   SDLoc DL(Op);
3615   SDVTList VTs = DAG.getVTList(VT0, VT1);
3616 
3617   SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
3618                             OpRHS, OpCarryIn);
3619 
3620   SDValue OutFlag =
3621       IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
3622                : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry);
3623 
3624   return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
3625 }
3626 
3627 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
3628   // Let legalize expand this if it isn't a legal type yet.
3629   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3630     return SDValue();
3631 
3632   SDLoc dl(Op);
3633   AArch64CC::CondCode CC;
3634   // The actual operation that sets the overflow or carry flag.
3635   SDValue Value, Overflow;
3636   std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
3637 
3638   // We use 0 and 1 as false and true values.
3639   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3640   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3641 
3642   // We use an inverted condition, because the conditional select is inverted
3643   // too. This will allow it to be selected to a single instruction:
3644   // CSINC Wd, WZR, WZR, invert(cond).
3645   SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3646   Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
3647                          CCVal, Overflow);
3648 
3649   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3650   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3651 }
3652 
3653 // Prefetch operands are:
3654 // 1: Address to prefetch
3655 // 2: bool isWrite
3656 // 3: int locality (0 = no locality ... 3 = extreme locality)
3657 // 4: bool isDataCache
3658 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
3659   SDLoc DL(Op);
3660   unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3661   unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
3662   unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3663 
3664   bool IsStream = !Locality;
3665   // When the locality number is set
3666   if (Locality) {
3667     // The front-end should have filtered out the out-of-range values
3668     assert(Locality <= 3 && "Prefetch locality out-of-range");
3669     // The locality degree is the opposite of the cache speed.
3670     // Put the number the other way around.
3671     // The encoding starts at 0 for level 1
3672     Locality = 3 - Locality;
3673   }
3674 
3675   // built the mask value encoding the expected behavior.
3676   unsigned PrfOp = (IsWrite << 4) |     // Load/Store bit
3677                    (!IsData << 3) |     // IsDataCache bit
3678                    (Locality << 1) |    // Cache level bits
3679                    (unsigned)IsStream;  // Stream bit
3680   return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
3681                      DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1));
3682 }
3683 
3684 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
3685                                               SelectionDAG &DAG) const {
3686   EVT VT = Op.getValueType();
3687   if (VT.isScalableVector())
3688     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
3689 
3690   if (useSVEForFixedLengthVectorVT(VT))
3691     return LowerFixedLengthFPExtendToSVE(Op, DAG);
3692 
3693   assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
3694   return SDValue();
3695 }
3696 
3697 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
3698                                              SelectionDAG &DAG) const {
3699   if (Op.getValueType().isScalableVector())
3700     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
3701 
3702   bool IsStrict = Op->isStrictFPOpcode();
3703   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3704   EVT SrcVT = SrcVal.getValueType();
3705 
3706   if (useSVEForFixedLengthVectorVT(SrcVT))
3707     return LowerFixedLengthFPRoundToSVE(Op, DAG);
3708 
3709   if (SrcVT != MVT::f128) {
3710     // Expand cases where the input is a vector bigger than NEON.
3711     if (useSVEForFixedLengthVectorVT(SrcVT))
3712       return SDValue();
3713 
3714     // It's legal except when f128 is involved
3715     return Op;
3716   }
3717 
3718   return SDValue();
3719 }
3720 
3721 SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
3722                                                     SelectionDAG &DAG) const {
3723   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3724   // Any additional optimization in this function should be recorded
3725   // in the cost tables.
3726   bool IsStrict = Op->isStrictFPOpcode();
3727   EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType();
3728   EVT VT = Op.getValueType();
3729 
3730   if (VT.isScalableVector()) {
3731     unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT
3732                           ? AArch64ISD::FCVTZU_MERGE_PASSTHRU
3733                           : AArch64ISD::FCVTZS_MERGE_PASSTHRU;
3734     return LowerToPredicatedOp(Op, DAG, Opcode);
3735   }
3736 
3737   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3738     return LowerFixedLengthFPToIntToSVE(Op, DAG);
3739 
3740   unsigned NumElts = InVT.getVectorNumElements();
3741 
3742   // f16 conversions are promoted to f32 when full fp16 is not supported.
3743   if (InVT.getVectorElementType() == MVT::f16 &&
3744       !Subtarget->hasFullFP16()) {
3745     MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts);
3746     SDLoc dl(Op);
3747     if (IsStrict) {
3748       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other},
3749                                 {Op.getOperand(0), Op.getOperand(1)});
3750       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3751                          {Ext.getValue(1), Ext.getValue(0)});
3752     }
3753     return DAG.getNode(
3754         Op.getOpcode(), dl, Op.getValueType(),
3755         DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0)));
3756   }
3757 
3758   uint64_t VTSize = VT.getFixedSizeInBits();
3759   uint64_t InVTSize = InVT.getFixedSizeInBits();
3760   if (VTSize < InVTSize) {
3761     SDLoc dl(Op);
3762     if (IsStrict) {
3763       InVT = InVT.changeVectorElementTypeToInteger();
3764       SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other},
3765                                {Op.getOperand(0), Op.getOperand(1)});
3766       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3767       return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl);
3768     }
3769     SDValue Cv =
3770         DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
3771                     Op.getOperand(0));
3772     return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3773   }
3774 
3775   if (VTSize > InVTSize) {
3776     SDLoc dl(Op);
3777     MVT ExtVT =
3778         MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
3779                          VT.getVectorNumElements());
3780     if (IsStrict) {
3781       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other},
3782                                 {Op.getOperand(0), Op.getOperand(1)});
3783       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3784                          {Ext.getValue(1), Ext.getValue(0)});
3785     }
3786     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
3787     return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
3788   }
3789 
3790   // Use a scalar operation for conversions between single-element vectors of
3791   // the same size.
3792   if (NumElts == 1) {
3793     SDLoc dl(Op);
3794     SDValue Extract = DAG.getNode(
3795         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
3796         Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64));
3797     EVT ScalarVT = VT.getScalarType();
3798     if (IsStrict)
3799       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
3800                          {Op.getOperand(0), Extract});
3801     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
3802   }
3803 
3804   // Type changing conversions are illegal.
3805   return Op;
3806 }
3807 
3808 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
3809                                               SelectionDAG &DAG) const {
3810   bool IsStrict = Op->isStrictFPOpcode();
3811   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3812 
3813   if (SrcVal.getValueType().isVector())
3814     return LowerVectorFP_TO_INT(Op, DAG);
3815 
3816   // f16 conversions are promoted to f32 when full fp16 is not supported.
3817   if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
3818     SDLoc dl(Op);
3819     if (IsStrict) {
3820       SDValue Ext =
3821           DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
3822                       {Op.getOperand(0), SrcVal});
3823       return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other},
3824                          {Ext.getValue(1), Ext.getValue(0)});
3825     }
3826     return DAG.getNode(
3827         Op.getOpcode(), dl, Op.getValueType(),
3828         DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal));
3829   }
3830 
3831   if (SrcVal.getValueType() != MVT::f128) {
3832     // It's legal except when f128 is involved
3833     return Op;
3834   }
3835 
3836   return SDValue();
3837 }
3838 
3839 SDValue
3840 AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
3841                                                 SelectionDAG &DAG) const {
3842   // AArch64 FP-to-int conversions saturate to the destination element size, so
3843   // we can lower common saturating conversions to simple instructions.
3844   SDValue SrcVal = Op.getOperand(0);
3845   EVT SrcVT = SrcVal.getValueType();
3846   EVT DstVT = Op.getValueType();
3847   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3848 
3849   uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits();
3850   uint64_t DstElementWidth = DstVT.getScalarSizeInBits();
3851   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3852   assert(SatWidth <= DstElementWidth &&
3853          "Saturation width cannot exceed result width");
3854 
3855   // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
3856   // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
3857   // types, so this is hard to reach.
3858   if (DstVT.isScalableVector())
3859     return SDValue();
3860 
3861   EVT SrcElementVT = SrcVT.getVectorElementType();
3862 
3863   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3864   if (SrcElementVT == MVT::f16 &&
3865       (!Subtarget->hasFullFP16() || DstElementWidth > 16)) {
3866     MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements());
3867     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal);
3868     SrcVT = F32VT;
3869     SrcElementVT = MVT::f32;
3870     SrcElementWidth = 32;
3871   } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 &&
3872              SrcElementVT != MVT::f16)
3873     return SDValue();
3874 
3875   SDLoc DL(Op);
3876   // Cases that we can emit directly.
3877   if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth)
3878     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3879                        DAG.getValueType(DstVT.getScalarType()));
3880 
3881   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3882   // result. This is only valid if the legal cvt is larger than the saturate
3883   // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize
3884   // (at least until sqxtn is selected).
3885   if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64)
3886     return SDValue();
3887 
3888   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
3889   SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal,
3890                                   DAG.getValueType(IntVT.getScalarType()));
3891   SDValue Sat;
3892   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3893     SDValue MinC = DAG.getConstant(
3894         APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3895     SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
3896     SDValue MaxC = DAG.getConstant(
3897         APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3898     Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
3899   } else {
3900     SDValue MinC = DAG.getConstant(
3901         APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
3902     Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
3903   }
3904 
3905   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3906 }
3907 
3908 SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
3909                                                   SelectionDAG &DAG) const {
3910   // AArch64 FP-to-int conversions saturate to the destination register size, so
3911   // we can lower common saturating conversions to simple instructions.
3912   SDValue SrcVal = Op.getOperand(0);
3913   EVT SrcVT = SrcVal.getValueType();
3914 
3915   if (SrcVT.isVector())
3916     return LowerVectorFP_TO_INT_SAT(Op, DAG);
3917 
3918   EVT DstVT = Op.getValueType();
3919   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3920   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3921   uint64_t DstWidth = DstVT.getScalarSizeInBits();
3922   assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width");
3923 
3924   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3925   if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) {
3926     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal);
3927     SrcVT = MVT::f32;
3928   } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16)
3929     return SDValue();
3930 
3931   SDLoc DL(Op);
3932   // Cases that we can emit directly.
3933   if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 ||
3934        (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) &&
3935       DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32))
3936     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3937                        DAG.getValueType(DstVT));
3938 
3939   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3940   // result. This is only valid if the legal cvt is larger than the saturate
3941   // width.
3942   if (DstWidth < SatWidth)
3943     return SDValue();
3944 
3945   SDValue NativeCvt =
3946       DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT));
3947   SDValue Sat;
3948   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3949     SDValue MinC = DAG.getConstant(
3950         APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
3951     SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
3952     SDValue MaxC = DAG.getConstant(
3953         APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
3954     Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
3955   } else {
3956     SDValue MinC = DAG.getConstant(
3957         APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
3958     Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
3959   }
3960 
3961   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3962 }
3963 
3964 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
3965                                                     SelectionDAG &DAG) const {
3966   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3967   // Any additional optimization in this function should be recorded
3968   // in the cost tables.
3969   bool IsStrict = Op->isStrictFPOpcode();
3970   EVT VT = Op.getValueType();
3971   SDLoc dl(Op);
3972   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
3973   EVT InVT = In.getValueType();
3974   unsigned Opc = Op.getOpcode();
3975   bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
3976 
3977   if (VT.isScalableVector()) {
3978     if (InVT.getVectorElementType() == MVT::i1) {
3979       // We can't directly extend an SVE predicate; extend it first.
3980       unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3981       EVT CastVT = getPromotedVTForPredicate(InVT);
3982       In = DAG.getNode(CastOpc, dl, CastVT, In);
3983       return DAG.getNode(Opc, dl, VT, In);
3984     }
3985 
3986     unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
3987                                : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
3988     return LowerToPredicatedOp(Op, DAG, Opcode);
3989   }
3990 
3991   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3992     return LowerFixedLengthIntToFPToSVE(Op, DAG);
3993 
3994   uint64_t VTSize = VT.getFixedSizeInBits();
3995   uint64_t InVTSize = InVT.getFixedSizeInBits();
3996   if (VTSize < InVTSize) {
3997     MVT CastVT =
3998         MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
3999                          InVT.getVectorNumElements());
4000     if (IsStrict) {
4001       In = DAG.getNode(Opc, dl, {CastVT, MVT::Other},
4002                        {Op.getOperand(0), In});
4003       return DAG.getNode(
4004           ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
4005           {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)});
4006     }
4007     In = DAG.getNode(Opc, dl, CastVT, In);
4008     return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl));
4009   }
4010 
4011   if (VTSize > InVTSize) {
4012     unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4013     EVT CastVT = VT.changeVectorElementTypeToInteger();
4014     In = DAG.getNode(CastOpc, dl, CastVT, In);
4015     if (IsStrict)
4016       return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In});
4017     return DAG.getNode(Opc, dl, VT, In);
4018   }
4019 
4020   // Use a scalar operation for conversions between single-element vectors of
4021   // the same size.
4022   if (VT.getVectorNumElements() == 1) {
4023     SDValue Extract = DAG.getNode(
4024         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
4025         In, DAG.getConstant(0, dl, MVT::i64));
4026     EVT ScalarVT = VT.getScalarType();
4027     if (IsStrict)
4028       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
4029                          {Op.getOperand(0), Extract});
4030     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
4031   }
4032 
4033   return Op;
4034 }
4035 
4036 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
4037                                             SelectionDAG &DAG) const {
4038   if (Op.getValueType().isVector())
4039     return LowerVectorINT_TO_FP(Op, DAG);
4040 
4041   bool IsStrict = Op->isStrictFPOpcode();
4042   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
4043 
4044   // f16 conversions are promoted to f32 when full fp16 is not supported.
4045   if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
4046     SDLoc dl(Op);
4047     if (IsStrict) {
4048       SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other},
4049                                 {Op.getOperand(0), SrcVal});
4050       return DAG.getNode(
4051           ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other},
4052           {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
4053     }
4054     return DAG.getNode(
4055         ISD::FP_ROUND, dl, MVT::f16,
4056         DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal),
4057         DAG.getIntPtrConstant(0, dl));
4058   }
4059 
4060   // i128 conversions are libcalls.
4061   if (SrcVal.getValueType() == MVT::i128)
4062     return SDValue();
4063 
4064   // Other conversions are legal, unless it's to the completely software-based
4065   // fp128.
4066   if (Op.getValueType() != MVT::f128)
4067     return Op;
4068   return SDValue();
4069 }
4070 
4071 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
4072                                             SelectionDAG &DAG) const {
4073   // For iOS, we want to call an alternative entry point: __sincos_stret,
4074   // which returns the values in two S / D registers.
4075   SDLoc dl(Op);
4076   SDValue Arg = Op.getOperand(0);
4077   EVT ArgVT = Arg.getValueType();
4078   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
4079 
4080   ArgListTy Args;
4081   ArgListEntry Entry;
4082 
4083   Entry.Node = Arg;
4084   Entry.Ty = ArgTy;
4085   Entry.IsSExt = false;
4086   Entry.IsZExt = false;
4087   Args.push_back(Entry);
4088 
4089   RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64
4090                                         : RTLIB::SINCOS_STRET_F32;
4091   const char *LibcallName = getLibcallName(LC);
4092   SDValue Callee =
4093       DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
4094 
4095   StructType *RetTy = StructType::get(ArgTy, ArgTy);
4096   TargetLowering::CallLoweringInfo CLI(DAG);
4097   CLI.setDebugLoc(dl)
4098       .setChain(DAG.getEntryNode())
4099       .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
4100 
4101   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
4102   return CallResult.first;
4103 }
4104 
4105 static MVT getSVEContainerType(EVT ContentTy);
4106 
4107 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
4108                                             SelectionDAG &DAG) const {
4109   EVT OpVT = Op.getValueType();
4110   EVT ArgVT = Op.getOperand(0).getValueType();
4111 
4112   if (useSVEForFixedLengthVectorVT(OpVT))
4113     return LowerFixedLengthBitcastToSVE(Op, DAG);
4114 
4115   if (OpVT.isScalableVector()) {
4116     // Bitcasting between unpacked vector types of different element counts is
4117     // not a NOP because the live elements are laid out differently.
4118     //                01234567
4119     // e.g. nxv2i32 = XX??XX??
4120     //      nxv4f16 = X?X?X?X?
4121     if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
4122       return SDValue();
4123 
4124     if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
4125       assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
4126              "Expected int->fp bitcast!");
4127       SDValue ExtResult =
4128           DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
4129                       Op.getOperand(0));
4130       return getSVESafeBitCast(OpVT, ExtResult, DAG);
4131     }
4132     return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
4133   }
4134 
4135   if (OpVT != MVT::f16 && OpVT != MVT::bf16)
4136     return SDValue();
4137 
4138   // Bitcasts between f16 and bf16 are legal.
4139   if (ArgVT == MVT::f16 || ArgVT == MVT::bf16)
4140     return Op;
4141 
4142   assert(ArgVT == MVT::i16);
4143   SDLoc DL(Op);
4144 
4145   Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
4146   Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
4147   return SDValue(
4148       DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op,
4149                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
4150       0);
4151 }
4152 
4153 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
4154   if (OrigVT.getSizeInBits() >= 64)
4155     return OrigVT;
4156 
4157   assert(OrigVT.isSimple() && "Expecting a simple value type");
4158 
4159   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
4160   switch (OrigSimpleTy) {
4161   default: llvm_unreachable("Unexpected Vector Type");
4162   case MVT::v2i8:
4163   case MVT::v2i16:
4164      return MVT::v2i32;
4165   case MVT::v4i8:
4166     return  MVT::v4i16;
4167   }
4168 }
4169 
4170 static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
4171                                                  const EVT &OrigTy,
4172                                                  const EVT &ExtTy,
4173                                                  unsigned ExtOpcode) {
4174   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
4175   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
4176   // 64-bits we need to insert a new extension so that it will be 64-bits.
4177   assert(ExtTy.is128BitVector() && "Unexpected extension size");
4178   if (OrigTy.getSizeInBits() >= 64)
4179     return N;
4180 
4181   // Must extend size to at least 64 bits to be used as an operand for VMULL.
4182   EVT NewVT = getExtensionTo64Bits(OrigTy);
4183 
4184   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
4185 }
4186 
4187 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4188                                    bool isSigned) {
4189   EVT VT = N->getValueType(0);
4190 
4191   if (N->getOpcode() != ISD::BUILD_VECTOR)
4192     return false;
4193 
4194   for (const SDValue &Elt : N->op_values()) {
4195     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4196       unsigned EltSize = VT.getScalarSizeInBits();
4197       unsigned HalfSize = EltSize / 2;
4198       if (isSigned) {
4199         if (!isIntN(HalfSize, C->getSExtValue()))
4200           return false;
4201       } else {
4202         if (!isUIntN(HalfSize, C->getZExtValue()))
4203           return false;
4204       }
4205       continue;
4206     }
4207     return false;
4208   }
4209 
4210   return true;
4211 }
4212 
4213 static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4214   if (N->getOpcode() == ISD::SIGN_EXTEND ||
4215       N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
4216     return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
4217                                              N->getOperand(0)->getValueType(0),
4218                                              N->getValueType(0),
4219                                              N->getOpcode());
4220 
4221   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4222   EVT VT = N->getValueType(0);
4223   SDLoc dl(N);
4224   unsigned EltSize = VT.getScalarSizeInBits() / 2;
4225   unsigned NumElts = VT.getVectorNumElements();
4226   MVT TruncVT = MVT::getIntegerVT(EltSize);
4227   SmallVector<SDValue, 8> Ops;
4228   for (unsigned i = 0; i != NumElts; ++i) {
4229     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4230     const APInt &CInt = C->getAPIntValue();
4231     // Element types smaller than 32 bits are not legal, so use i32 elements.
4232     // The values are implicitly truncated so sext vs. zext doesn't matter.
4233     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
4234   }
4235   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
4236 }
4237 
4238 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4239   return N->getOpcode() == ISD::SIGN_EXTEND ||
4240          N->getOpcode() == ISD::ANY_EXTEND ||
4241          isExtendedBUILD_VECTOR(N, DAG, true);
4242 }
4243 
4244 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4245   return N->getOpcode() == ISD::ZERO_EXTEND ||
4246          N->getOpcode() == ISD::ANY_EXTEND ||
4247          isExtendedBUILD_VECTOR(N, DAG, false);
4248 }
4249 
4250 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4251   unsigned Opcode = N->getOpcode();
4252   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4253     SDNode *N0 = N->getOperand(0).getNode();
4254     SDNode *N1 = N->getOperand(1).getNode();
4255     return N0->hasOneUse() && N1->hasOneUse() &&
4256       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4257   }
4258   return false;
4259 }
4260 
4261 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4262   unsigned Opcode = N->getOpcode();
4263   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4264     SDNode *N0 = N->getOperand(0).getNode();
4265     SDNode *N1 = N->getOperand(1).getNode();
4266     return N0->hasOneUse() && N1->hasOneUse() &&
4267       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4268   }
4269   return false;
4270 }
4271 
4272 SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4273                                                 SelectionDAG &DAG) const {
4274   // The rounding mode is in bits 23:22 of the FPSCR.
4275   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4276   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4277   // so that the shift + and get folded into a bitfield extract.
4278   SDLoc dl(Op);
4279 
4280   SDValue Chain = Op.getOperand(0);
4281   SDValue FPCR_64 = DAG.getNode(
4282       ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other},
4283       {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)});
4284   Chain = FPCR_64.getValue(1);
4285   SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64);
4286   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32,
4287                                   DAG.getConstant(1U << 22, dl, MVT::i32));
4288   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4289                               DAG.getConstant(22, dl, MVT::i32));
4290   SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4291                             DAG.getConstant(3, dl, MVT::i32));
4292   return DAG.getMergeValues({AND, Chain}, dl);
4293 }
4294 
4295 SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
4296                                                  SelectionDAG &DAG) const {
4297   SDLoc DL(Op);
4298   SDValue Chain = Op->getOperand(0);
4299   SDValue RMValue = Op->getOperand(1);
4300 
4301   // The rounding mode is in bits 23:22 of the FPCR.
4302   // The llvm.set.rounding argument value to the rounding mode in FPCR mapping
4303   // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
4304   // ((arg - 1) & 3) << 22).
4305   //
4306   // The argument of llvm.set.rounding must be within the segment [0, 3], so
4307   // NearestTiesToAway (4) is not handled here. It is responsibility of the code
4308   // generated llvm.set.rounding to ensure this condition.
4309 
4310   // Calculate new value of FPCR[23:22].
4311   RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
4312                         DAG.getConstant(1, DL, MVT::i32));
4313   RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
4314                         DAG.getConstant(0x3, DL, MVT::i32));
4315   RMValue =
4316       DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
4317                   DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32));
4318   RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue);
4319 
4320   // Get current value of FPCR.
4321   SDValue Ops[] = {
4322       Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)};
4323   SDValue FPCR =
4324       DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops);
4325   Chain = FPCR.getValue(1);
4326   FPCR = FPCR.getValue(0);
4327 
4328   // Put new rounding mode into FPSCR[23:22].
4329   const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos);
4330   FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR,
4331                      DAG.getConstant(RMMask, DL, MVT::i64));
4332   FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue);
4333   SDValue Ops2[] = {
4334       Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
4335       FPCR};
4336   return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
4337 }
4338 
4339 SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4340   EVT VT = Op.getValueType();
4341 
4342   // If SVE is available then i64 vector multiplications can also be made legal.
4343   bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64;
4344 
4345   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
4346     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
4347 
4348   // Multiplications are only custom-lowered for 128-bit vectors so that
4349   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
4350   assert(VT.is128BitVector() && VT.isInteger() &&
4351          "unexpected type for custom-lowering ISD::MUL");
4352   SDNode *N0 = Op.getOperand(0).getNode();
4353   SDNode *N1 = Op.getOperand(1).getNode();
4354   unsigned NewOpc = 0;
4355   bool isMLA = false;
4356   bool isN0SExt = isSignExtended(N0, DAG);
4357   bool isN1SExt = isSignExtended(N1, DAG);
4358   if (isN0SExt && isN1SExt)
4359     NewOpc = AArch64ISD::SMULL;
4360   else {
4361     bool isN0ZExt = isZeroExtended(N0, DAG);
4362     bool isN1ZExt = isZeroExtended(N1, DAG);
4363     if (isN0ZExt && isN1ZExt)
4364       NewOpc = AArch64ISD::UMULL;
4365     else if (isN1SExt || isN1ZExt) {
4366       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4367       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4368       if (isN1SExt && isAddSubSExt(N0, DAG)) {
4369         NewOpc = AArch64ISD::SMULL;
4370         isMLA = true;
4371       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4372         NewOpc =  AArch64ISD::UMULL;
4373         isMLA = true;
4374       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4375         std::swap(N0, N1);
4376         NewOpc =  AArch64ISD::UMULL;
4377         isMLA = true;
4378       }
4379     }
4380 
4381     if (!NewOpc) {
4382       if (VT == MVT::v2i64)
4383         // Fall through to expand this.  It is not legal.
4384         return SDValue();
4385       else
4386         // Other vector multiplications are legal.
4387         return Op;
4388     }
4389   }
4390 
4391   // Legalize to a S/UMULL instruction
4392   SDLoc DL(Op);
4393   SDValue Op0;
4394   SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
4395   if (!isMLA) {
4396     Op0 = skipExtensionForVectorMULL(N0, DAG);
4397     assert(Op0.getValueType().is64BitVector() &&
4398            Op1.getValueType().is64BitVector() &&
4399            "unexpected types for extended operands to VMULL");
4400     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4401   }
4402   // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
4403   // isel lowering to take advantage of no-stall back to back s/umul + s/umla.
4404   // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4405   SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
4406   SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
4407   EVT Op1VT = Op1.getValueType();
4408   return DAG.getNode(N0->getOpcode(), DL, VT,
4409                      DAG.getNode(NewOpc, DL, VT,
4410                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4411                      DAG.getNode(NewOpc, DL, VT,
4412                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4413 }
4414 
4415 static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
4416                                int Pattern) {
4417   if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
4418     return DAG.getConstant(1, DL, MVT::nxv1i1);
4419   return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
4420                      DAG.getTargetConstant(Pattern, DL, MVT::i32));
4421 }
4422 
4423 // Returns a safe bitcast between two scalable vector predicates, where
4424 // any newly created lanes from a widening bitcast are defined as zero.
4425 static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {
4426   SDLoc DL(Op);
4427   EVT InVT = Op.getValueType();
4428 
4429   assert(InVT.getVectorElementType() == MVT::i1 &&
4430          VT.getVectorElementType() == MVT::i1 &&
4431          "Expected a predicate-to-predicate bitcast");
4432   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
4433          InVT.isScalableVector() &&
4434          DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&
4435          "Only expect to cast between legal scalable predicate types!");
4436 
4437   // Return the operand if the cast isn't changing type,
4438   // e.g. <n x 16 x i1> -> <n x 16 x i1>
4439   if (InVT == VT)
4440     return Op;
4441 
4442   SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
4443 
4444   // We only have to zero the lanes if new lanes are being defined, e.g. when
4445   // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the
4446   // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then
4447   // we can return here.
4448   if (InVT.bitsGT(VT))
4449     return Reinterpret;
4450 
4451   // Check if the other lanes are already known to be zeroed by
4452   // construction.
4453   if (isZeroingInactiveLanes(Op))
4454     return Reinterpret;
4455 
4456   // Zero the newly introduced lanes.
4457   SDValue Mask = DAG.getConstant(1, DL, InVT);
4458   Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask);
4459   return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask);
4460 }
4461 
4462 SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4463                                                       SelectionDAG &DAG) const {
4464   unsigned IntNo = Op.getConstantOperandVal(1);
4465   SDLoc DL(Op);
4466   switch (IntNo) {
4467   default:
4468     return SDValue(); // Don't custom lower most intrinsics.
4469   case Intrinsic::aarch64_mops_memset_tag: {
4470     auto Node = cast<MemIntrinsicSDNode>(Op.getNode());
4471     SDValue Chain = Node->getChain();
4472     SDValue Dst = Op.getOperand(2);
4473     SDValue Val = Op.getOperand(3);
4474     Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64);
4475     SDValue Size = Op.getOperand(4);
4476     auto Alignment = Node->getMemOperand()->getAlign();
4477     bool IsVol = Node->isVolatile();
4478     auto DstPtrInfo = Node->getPointerInfo();
4479 
4480     const auto &SDI =
4481         static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo());
4482     SDValue MS =
4483         SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val,
4484                      Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{});
4485 
4486     // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the
4487     // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise
4488     // LowerOperationWrapper will complain that the number of results has
4489     // changed.
4490     return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL);
4491   }
4492   case Intrinsic::aarch64_sme_get_pstatesm: {
4493     SDValue Chain = Op.getOperand(0);
4494     SDValue MRS = DAG.getNode(
4495         AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
4496         Chain, DAG.getConstant(AArch64SysReg::SVCR, DL, MVT::i64));
4497     SDValue Mask = DAG.getConstant(/* PSTATE.SM */ 1, DL, MVT::i64);
4498     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, MRS, Mask);
4499     return DAG.getMergeValues({And, Chain}, DL);
4500   }
4501   }
4502 }
4503 
4504 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4505                                                      SelectionDAG &DAG) const {
4506   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4507   SDLoc dl(Op);
4508   switch (IntNo) {
4509   default: return SDValue();    // Don't custom lower most intrinsics.
4510   case Intrinsic::thread_pointer: {
4511     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4512     return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
4513   }
4514   case Intrinsic::aarch64_neon_abs: {
4515     EVT Ty = Op.getValueType();
4516     if (Ty == MVT::i64) {
4517       SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
4518                                    Op.getOperand(1));
4519       Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
4520       return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
4521     } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
4522       return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
4523     } else {
4524       report_fatal_error("Unexpected type for AArch64 NEON intrinic");
4525     }
4526   }
4527   case Intrinsic::aarch64_neon_smax:
4528     return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
4529                        Op.getOperand(1), Op.getOperand(2));
4530   case Intrinsic::aarch64_neon_umax:
4531     return DAG.getNode(ISD::UMAX, dl, Op.getValueType(),
4532                        Op.getOperand(1), Op.getOperand(2));
4533   case Intrinsic::aarch64_neon_smin:
4534     return DAG.getNode(ISD::SMIN, dl, Op.getValueType(),
4535                        Op.getOperand(1), Op.getOperand(2));
4536   case Intrinsic::aarch64_neon_umin:
4537     return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
4538                        Op.getOperand(1), Op.getOperand(2));
4539 
4540   case Intrinsic::aarch64_sve_sunpkhi:
4541     return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
4542                        Op.getOperand(1));
4543   case Intrinsic::aarch64_sve_sunpklo:
4544     return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(),
4545                        Op.getOperand(1));
4546   case Intrinsic::aarch64_sve_uunpkhi:
4547     return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(),
4548                        Op.getOperand(1));
4549   case Intrinsic::aarch64_sve_uunpklo:
4550     return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(),
4551                        Op.getOperand(1));
4552   case Intrinsic::aarch64_sve_clasta_n:
4553     return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(),
4554                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4555   case Intrinsic::aarch64_sve_clastb_n:
4556     return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(),
4557                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4558   case Intrinsic::aarch64_sve_lasta:
4559     return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(),
4560                        Op.getOperand(1), Op.getOperand(2));
4561   case Intrinsic::aarch64_sve_lastb:
4562     return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(),
4563                        Op.getOperand(1), Op.getOperand(2));
4564   case Intrinsic::aarch64_sve_rev:
4565     return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(),
4566                        Op.getOperand(1));
4567   case Intrinsic::aarch64_sve_tbl:
4568     return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(),
4569                        Op.getOperand(1), Op.getOperand(2));
4570   case Intrinsic::aarch64_sve_trn1:
4571     return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(),
4572                        Op.getOperand(1), Op.getOperand(2));
4573   case Intrinsic::aarch64_sve_trn2:
4574     return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(),
4575                        Op.getOperand(1), Op.getOperand(2));
4576   case Intrinsic::aarch64_sve_uzp1:
4577     return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(),
4578                        Op.getOperand(1), Op.getOperand(2));
4579   case Intrinsic::aarch64_sve_uzp2:
4580     return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(),
4581                        Op.getOperand(1), Op.getOperand(2));
4582   case Intrinsic::aarch64_sve_zip1:
4583     return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(),
4584                        Op.getOperand(1), Op.getOperand(2));
4585   case Intrinsic::aarch64_sve_zip2:
4586     return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(),
4587                        Op.getOperand(1), Op.getOperand(2));
4588   case Intrinsic::aarch64_sve_splice:
4589     return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
4590                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4591   case Intrinsic::aarch64_sve_ptrue:
4592     return getPTrue(DAG, dl, Op.getValueType(),
4593                     cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
4594   case Intrinsic::aarch64_sve_clz:
4595     return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
4596                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4597   case Intrinsic::aarch64_sme_cntsb:
4598     return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4599                        DAG.getConstant(1, dl, MVT::i32));
4600   case Intrinsic::aarch64_sme_cntsh: {
4601     SDValue One = DAG.getConstant(1, dl, MVT::i32);
4602     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One);
4603     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One);
4604   }
4605   case Intrinsic::aarch64_sme_cntsw: {
4606     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4607                                 DAG.getConstant(1, dl, MVT::i32));
4608     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4609                        DAG.getConstant(2, dl, MVT::i32));
4610   }
4611   case Intrinsic::aarch64_sme_cntsd: {
4612     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4613                                 DAG.getConstant(1, dl, MVT::i32));
4614     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4615                        DAG.getConstant(3, dl, MVT::i32));
4616   }
4617   case Intrinsic::aarch64_sve_cnt: {
4618     SDValue Data = Op.getOperand(3);
4619     // CTPOP only supports integer operands.
4620     if (Data.getValueType().isFloatingPoint())
4621       Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data);
4622     return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(),
4623                        Op.getOperand(2), Data, Op.getOperand(1));
4624   }
4625   case Intrinsic::aarch64_sve_dupq_lane:
4626     return LowerDUPQLane(Op, DAG);
4627   case Intrinsic::aarch64_sve_convert_from_svbool:
4628     return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG);
4629   case Intrinsic::aarch64_sve_convert_to_svbool:
4630     return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG);
4631   case Intrinsic::aarch64_sve_fneg:
4632     return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4633                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4634   case Intrinsic::aarch64_sve_frintp:
4635     return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(),
4636                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4637   case Intrinsic::aarch64_sve_frintm:
4638     return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(),
4639                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4640   case Intrinsic::aarch64_sve_frinti:
4641     return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4642                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4643   case Intrinsic::aarch64_sve_frintx:
4644     return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4645                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4646   case Intrinsic::aarch64_sve_frinta:
4647     return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(),
4648                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4649   case Intrinsic::aarch64_sve_frintn:
4650     return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(),
4651                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4652   case Intrinsic::aarch64_sve_frintz:
4653     return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(),
4654                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4655   case Intrinsic::aarch64_sve_ucvtf:
4656     return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl,
4657                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4658                        Op.getOperand(1));
4659   case Intrinsic::aarch64_sve_scvtf:
4660     return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl,
4661                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4662                        Op.getOperand(1));
4663   case Intrinsic::aarch64_sve_fcvtzu:
4664     return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl,
4665                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4666                        Op.getOperand(1));
4667   case Intrinsic::aarch64_sve_fcvtzs:
4668     return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl,
4669                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4670                        Op.getOperand(1));
4671   case Intrinsic::aarch64_sve_fsqrt:
4672     return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(),
4673                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4674   case Intrinsic::aarch64_sve_frecpx:
4675     return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(),
4676                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4677   case Intrinsic::aarch64_sve_frecpe_x:
4678     return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(),
4679                        Op.getOperand(1));
4680   case Intrinsic::aarch64_sve_frecps_x:
4681     return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(),
4682                        Op.getOperand(1), Op.getOperand(2));
4683   case Intrinsic::aarch64_sve_frsqrte_x:
4684     return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(),
4685                        Op.getOperand(1));
4686   case Intrinsic::aarch64_sve_frsqrts_x:
4687     return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(),
4688                        Op.getOperand(1), Op.getOperand(2));
4689   case Intrinsic::aarch64_sve_fabs:
4690     return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4691                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4692   case Intrinsic::aarch64_sve_abs:
4693     return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4694                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4695   case Intrinsic::aarch64_sve_neg:
4696     return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4697                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4698   case Intrinsic::aarch64_sve_insr: {
4699     SDValue Scalar = Op.getOperand(2);
4700     EVT ScalarTy = Scalar.getValueType();
4701     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
4702       Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
4703 
4704     return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
4705                        Op.getOperand(1), Scalar);
4706   }
4707   case Intrinsic::aarch64_sve_rbit:
4708     return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
4709                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4710                        Op.getOperand(1));
4711   case Intrinsic::aarch64_sve_revb:
4712     return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
4713                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4714   case Intrinsic::aarch64_sve_revh:
4715     return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(),
4716                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4717   case Intrinsic::aarch64_sve_revw:
4718     return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(),
4719                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4720   case Intrinsic::aarch64_sve_revd:
4721     return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(),
4722                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4723   case Intrinsic::aarch64_sve_sxtb:
4724     return DAG.getNode(
4725         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4726         Op.getOperand(2), Op.getOperand(3),
4727         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4728         Op.getOperand(1));
4729   case Intrinsic::aarch64_sve_sxth:
4730     return DAG.getNode(
4731         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4732         Op.getOperand(2), Op.getOperand(3),
4733         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4734         Op.getOperand(1));
4735   case Intrinsic::aarch64_sve_sxtw:
4736     return DAG.getNode(
4737         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4738         Op.getOperand(2), Op.getOperand(3),
4739         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4740         Op.getOperand(1));
4741   case Intrinsic::aarch64_sve_uxtb:
4742     return DAG.getNode(
4743         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4744         Op.getOperand(2), Op.getOperand(3),
4745         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4746         Op.getOperand(1));
4747   case Intrinsic::aarch64_sve_uxth:
4748     return DAG.getNode(
4749         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4750         Op.getOperand(2), Op.getOperand(3),
4751         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4752         Op.getOperand(1));
4753   case Intrinsic::aarch64_sve_uxtw:
4754     return DAG.getNode(
4755         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4756         Op.getOperand(2), Op.getOperand(3),
4757         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4758         Op.getOperand(1));
4759   case Intrinsic::localaddress: {
4760     const auto &MF = DAG.getMachineFunction();
4761     const auto *RegInfo = Subtarget->getRegisterInfo();
4762     unsigned Reg = RegInfo->getLocalAddressRegister(MF);
4763     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
4764                               Op.getSimpleValueType());
4765   }
4766 
4767   case Intrinsic::eh_recoverfp: {
4768     // FIXME: This needs to be implemented to correctly handle highly aligned
4769     // stack objects. For now we simply return the incoming FP. Refer D53541
4770     // for more details.
4771     SDValue FnOp = Op.getOperand(1);
4772     SDValue IncomingFPOp = Op.getOperand(2);
4773     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
4774     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
4775     if (!Fn)
4776       report_fatal_error(
4777           "llvm.eh.recoverfp must take a function as the first argument");
4778     return IncomingFPOp;
4779   }
4780 
4781   case Intrinsic::aarch64_neon_vsri:
4782   case Intrinsic::aarch64_neon_vsli: {
4783     EVT Ty = Op.getValueType();
4784 
4785     if (!Ty.isVector())
4786       report_fatal_error("Unexpected type for aarch64_neon_vsli");
4787 
4788     assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
4789 
4790     bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri;
4791     unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
4792     return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2),
4793                        Op.getOperand(3));
4794   }
4795 
4796   case Intrinsic::aarch64_neon_srhadd:
4797   case Intrinsic::aarch64_neon_urhadd:
4798   case Intrinsic::aarch64_neon_shadd:
4799   case Intrinsic::aarch64_neon_uhadd: {
4800     bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4801                         IntNo == Intrinsic::aarch64_neon_shadd);
4802     bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4803                           IntNo == Intrinsic::aarch64_neon_urhadd);
4804     unsigned Opcode = IsSignedAdd
4805                           ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
4806                           : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
4807     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4808                        Op.getOperand(2));
4809   }
4810   case Intrinsic::aarch64_neon_sabd:
4811   case Intrinsic::aarch64_neon_uabd: {
4812     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU
4813                                                             : ISD::ABDS;
4814     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4815                        Op.getOperand(2));
4816   }
4817   case Intrinsic::aarch64_neon_saddlp:
4818   case Intrinsic::aarch64_neon_uaddlp: {
4819     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp
4820                           ? AArch64ISD::UADDLP
4821                           : AArch64ISD::SADDLP;
4822     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1));
4823   }
4824   case Intrinsic::aarch64_neon_sdot:
4825   case Intrinsic::aarch64_neon_udot:
4826   case Intrinsic::aarch64_sve_sdot:
4827   case Intrinsic::aarch64_sve_udot: {
4828     unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot ||
4829                        IntNo == Intrinsic::aarch64_sve_udot)
4830                           ? AArch64ISD::UDOT
4831                           : AArch64ISD::SDOT;
4832     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4833                        Op.getOperand(2), Op.getOperand(3));
4834   }
4835   case Intrinsic::get_active_lane_mask: {
4836     SDValue ID =
4837         DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64);
4838     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID,
4839                        Op.getOperand(1), Op.getOperand(2));
4840   }
4841   }
4842 }
4843 
4844 bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
4845   if (VT.getVectorElementType() == MVT::i8 ||
4846       VT.getVectorElementType() == MVT::i16) {
4847     EltTy = MVT::i32;
4848     return true;
4849   }
4850   return false;
4851 }
4852 
4853 bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
4854                                                           EVT DataVT) const {
4855   // SVE only supports implicit extension of 32-bit indices.
4856   if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32)
4857     return false;
4858 
4859   // Indices cannot be smaller than the main data type.
4860   if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits())
4861     return false;
4862 
4863   // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit
4864   // element container type, which would violate the previous clause.
4865   return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2;
4866 }
4867 
4868 bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
4869   return ExtVal.getValueType().isScalableVector() ||
4870          useSVEForFixedLengthVectorVT(
4871              ExtVal.getValueType(),
4872              /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors());
4873 }
4874 
4875 unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
4876   std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = {
4877       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false),
4878        AArch64ISD::GLD1_MERGE_ZERO},
4879       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true),
4880        AArch64ISD::GLD1_UXTW_MERGE_ZERO},
4881       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false),
4882        AArch64ISD::GLD1_MERGE_ZERO},
4883       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true),
4884        AArch64ISD::GLD1_SXTW_MERGE_ZERO},
4885       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false),
4886        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4887       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true),
4888        AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO},
4889       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false),
4890        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4891       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true),
4892        AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO},
4893   };
4894   auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend);
4895   return AddrModes.find(Key)->second;
4896 }
4897 
4898 unsigned getSignExtendedGatherOpcode(unsigned Opcode) {
4899   switch (Opcode) {
4900   default:
4901     llvm_unreachable("unimplemented opcode");
4902     return Opcode;
4903   case AArch64ISD::GLD1_MERGE_ZERO:
4904     return AArch64ISD::GLD1S_MERGE_ZERO;
4905   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
4906     return AArch64ISD::GLD1S_IMM_MERGE_ZERO;
4907   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
4908     return AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
4909   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
4910     return AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
4911   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
4912     return AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
4913   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
4914     return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
4915   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
4916     return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
4917   }
4918 }
4919 
4920 SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
4921                                             SelectionDAG &DAG) const {
4922   MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op);
4923 
4924   SDLoc DL(Op);
4925   SDValue Chain = MGT->getChain();
4926   SDValue PassThru = MGT->getPassThru();
4927   SDValue Mask = MGT->getMask();
4928   SDValue BasePtr = MGT->getBasePtr();
4929   SDValue Index = MGT->getIndex();
4930   SDValue Scale = MGT->getScale();
4931   EVT VT = Op.getValueType();
4932   EVT MemVT = MGT->getMemoryVT();
4933   ISD::LoadExtType ExtType = MGT->getExtensionType();
4934   ISD::MemIndexType IndexType = MGT->getIndexType();
4935 
4936   // SVE supports zero (and so undef) passthrough values only, everything else
4937   // must be handled manually by an explicit select on the load's output.
4938   if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
4939     SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
4940     SDValue Load =
4941         DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4942                             MGT->getMemOperand(), IndexType, ExtType);
4943     SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru);
4944     return DAG.getMergeValues({Select, Load.getValue(1)}, DL);
4945   }
4946 
4947   bool IsScaled = MGT->isIndexScaled();
4948   bool IsSigned = MGT->isIndexSigned();
4949 
4950   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
4951   // must be calculated before hand.
4952   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
4953   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
4954     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
4955     EVT IndexVT = Index.getValueType();
4956     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
4957                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
4958     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
4959 
4960     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
4961     return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4962                                MGT->getMemOperand(), IndexType, ExtType);
4963   }
4964 
4965   // Lower fixed length gather to a scalable equivalent.
4966   if (VT.isFixedLengthVector()) {
4967     assert(Subtarget->useSVEForFixedLengthVectors() &&
4968            "Cannot lower when not using SVE for fixed vectors!");
4969 
4970     // NOTE: Handle floating-point as if integer then bitcast the result.
4971     EVT DataVT = VT.changeVectorElementTypeToInteger();
4972     MemVT = MemVT.changeVectorElementTypeToInteger();
4973 
4974     // Find the smallest integer fixed length vector we can use for the gather.
4975     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
4976     if (DataVT.getVectorElementType() == MVT::i64 ||
4977         Index.getValueType().getVectorElementType() == MVT::i64 ||
4978         Mask.getValueType().getVectorElementType() == MVT::i64)
4979       PromotedVT = VT.changeVectorElementType(MVT::i64);
4980 
4981     // Promote vector operands except for passthrough, which we know is either
4982     // undef or zero, and thus best constructed directly.
4983     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4984     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
4985     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
4986 
4987     // A promoted result type forces the need for an extending load.
4988     if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD)
4989       ExtType = ISD::EXTLOAD;
4990 
4991     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
4992 
4993     // Convert fixed length vector operands to scalable.
4994     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
4995     Index = convertToScalableVector(DAG, ContainerVT, Index);
4996     Mask = convertFixedMaskToScalableVector(Mask, DAG);
4997     PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
4998                                    : DAG.getConstant(0, DL, ContainerVT);
4999 
5000     // Emit equivalent scalable vector gather.
5001     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
5002     SDValue Load =
5003         DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL,
5004                             Ops, MGT->getMemOperand(), IndexType, ExtType);
5005 
5006     // Extract fixed length data then convert to the required result type.
5007     SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load);
5008     Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result);
5009     if (VT.isFloatingPoint())
5010       Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
5011 
5012     return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5013   }
5014 
5015   // Everything else is legal.
5016   return Op;
5017 }
5018 
5019 SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
5020                                              SelectionDAG &DAG) const {
5021   MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op);
5022 
5023   SDLoc DL(Op);
5024   SDValue Chain = MSC->getChain();
5025   SDValue StoreVal = MSC->getValue();
5026   SDValue Mask = MSC->getMask();
5027   SDValue BasePtr = MSC->getBasePtr();
5028   SDValue Index = MSC->getIndex();
5029   SDValue Scale = MSC->getScale();
5030   EVT VT = StoreVal.getValueType();
5031   EVT MemVT = MSC->getMemoryVT();
5032   ISD::MemIndexType IndexType = MSC->getIndexType();
5033   bool Truncating = MSC->isTruncatingStore();
5034 
5035   bool IsScaled = MSC->isIndexScaled();
5036   bool IsSigned = MSC->isIndexSigned();
5037 
5038   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
5039   // must be calculated before hand.
5040   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
5041   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
5042     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
5043     EVT IndexVT = Index.getValueType();
5044     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
5045                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
5046     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
5047 
5048     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5049     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5050                                 MSC->getMemOperand(), IndexType, Truncating);
5051   }
5052 
5053   // Lower fixed length scatter to a scalable equivalent.
5054   if (VT.isFixedLengthVector()) {
5055     assert(Subtarget->useSVEForFixedLengthVectors() &&
5056            "Cannot lower when not using SVE for fixed vectors!");
5057 
5058     // Once bitcast we treat floating-point scatters as if integer.
5059     if (VT.isFloatingPoint()) {
5060       VT = VT.changeVectorElementTypeToInteger();
5061       MemVT = MemVT.changeVectorElementTypeToInteger();
5062       StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal);
5063     }
5064 
5065     // Find the smallest integer fixed length vector we can use for the scatter.
5066     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
5067     if (VT.getVectorElementType() == MVT::i64 ||
5068         Index.getValueType().getVectorElementType() == MVT::i64 ||
5069         Mask.getValueType().getVectorElementType() == MVT::i64)
5070       PromotedVT = VT.changeVectorElementType(MVT::i64);
5071 
5072     // Promote vector operands.
5073     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5074     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
5075     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
5076     StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal);
5077 
5078     // A promoted value type forces the need for a truncating store.
5079     if (PromotedVT != VT)
5080       Truncating = true;
5081 
5082     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
5083 
5084     // Convert fixed length vector operands to scalable.
5085     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
5086     Index = convertToScalableVector(DAG, ContainerVT, Index);
5087     Mask = convertFixedMaskToScalableVector(Mask, DAG);
5088     StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
5089 
5090     // Emit equivalent scalable vector scatter.
5091     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5092     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5093                                 MSC->getMemOperand(), IndexType, Truncating);
5094   }
5095 
5096   // Everything else is legal.
5097   return Op;
5098 }
5099 
5100 SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
5101   SDLoc DL(Op);
5102   MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op);
5103   assert(LoadNode && "Expected custom lowering of a masked load node");
5104   EVT VT = Op->getValueType(0);
5105 
5106   if (useSVEForFixedLengthVectorVT(
5107           VT,
5108           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5109     return LowerFixedLengthVectorMLoadToSVE(Op, DAG);
5110 
5111   SDValue PassThru = LoadNode->getPassThru();
5112   SDValue Mask = LoadNode->getMask();
5113 
5114   if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
5115     return Op;
5116 
5117   SDValue Load = DAG.getMaskedLoad(
5118       VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(),
5119       LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(),
5120       LoadNode->getMemOperand(), LoadNode->getAddressingMode(),
5121       LoadNode->getExtensionType());
5122 
5123   SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru);
5124 
5125   return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5126 }
5127 
5128 // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
5129 static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
5130                                         EVT VT, EVT MemVT,
5131                                         SelectionDAG &DAG) {
5132   assert(VT.isVector() && "VT should be a vector type");
5133   assert(MemVT == MVT::v4i8 && VT == MVT::v4i16);
5134 
5135   SDValue Value = ST->getValue();
5136 
5137   // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract
5138   // the word lane which represent the v4i8 subvector.  It optimizes the store
5139   // to:
5140   //
5141   //   xtn  v0.8b, v0.8h
5142   //   str  s0, [x0]
5143 
5144   SDValue Undef = DAG.getUNDEF(MVT::i16);
5145   SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL,
5146                                         {Undef, Undef, Undef, Undef});
5147 
5148   SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16,
5149                                  Value, UndefVec);
5150   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt);
5151 
5152   Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc);
5153   SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5154                                      Trunc, DAG.getConstant(0, DL, MVT::i64));
5155 
5156   return DAG.getStore(ST->getChain(), DL, ExtractTrunc,
5157                       ST->getBasePtr(), ST->getMemOperand());
5158 }
5159 
5160 // Custom lowering for any store, vector or scalar and/or default or with
5161 // a truncate operations.  Currently only custom lower truncate operation
5162 // from vector v4i16 to v4i8 or volatile stores of i128.
5163 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
5164                                           SelectionDAG &DAG) const {
5165   SDLoc Dl(Op);
5166   StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
5167   assert (StoreNode && "Can only custom lower store nodes");
5168 
5169   SDValue Value = StoreNode->getValue();
5170 
5171   EVT VT = Value.getValueType();
5172   EVT MemVT = StoreNode->getMemoryVT();
5173 
5174   if (VT.isVector()) {
5175     if (useSVEForFixedLengthVectorVT(
5176             VT,
5177             /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5178       return LowerFixedLengthVectorStoreToSVE(Op, DAG);
5179 
5180     unsigned AS = StoreNode->getAddressSpace();
5181     Align Alignment = StoreNode->getAlign();
5182     if (Alignment < MemVT.getStoreSize() &&
5183         !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
5184                                         StoreNode->getMemOperand()->getFlags(),
5185                                         nullptr)) {
5186       return scalarizeVectorStore(StoreNode, DAG);
5187     }
5188 
5189     if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 &&
5190         MemVT == MVT::v4i8) {
5191       return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG);
5192     }
5193     // 256 bit non-temporal stores can be lowered to STNP. Do this as part of
5194     // the custom lowering, as there are no un-paired non-temporal stores and
5195     // legalization will break up 256 bit inputs.
5196     ElementCount EC = MemVT.getVectorElementCount();
5197     if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u &&
5198         EC.isKnownEven() &&
5199         ((MemVT.getScalarSizeInBits() == 8u ||
5200           MemVT.getScalarSizeInBits() == 16u ||
5201           MemVT.getScalarSizeInBits() == 32u ||
5202           MemVT.getScalarSizeInBits() == 64u))) {
5203       SDValue Lo =
5204           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5205                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5206                       StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64));
5207       SDValue Hi =
5208           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5209                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5210                       StoreNode->getValue(),
5211                       DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64));
5212       SDValue Result = DAG.getMemIntrinsicNode(
5213           AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other),
5214           {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5215           StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5216       return Result;
5217     }
5218   } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) {
5219     return LowerStore128(Op, DAG);
5220   } else if (MemVT == MVT::i64x8) {
5221     SDValue Value = StoreNode->getValue();
5222     assert(Value->getValueType(0) == MVT::i64x8);
5223     SDValue Chain = StoreNode->getChain();
5224     SDValue Base = StoreNode->getBasePtr();
5225     EVT PtrVT = Base.getValueType();
5226     for (unsigned i = 0; i < 8; i++) {
5227       SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64,
5228                                  Value, DAG.getConstant(i, Dl, MVT::i32));
5229       SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base,
5230                                 DAG.getConstant(i * 8, Dl, PtrVT));
5231       Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(),
5232                            StoreNode->getOriginalAlign());
5233     }
5234     return Chain;
5235   }
5236 
5237   return SDValue();
5238 }
5239 
5240 /// Lower atomic or volatile 128-bit stores to a single STP instruction.
5241 SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
5242                                              SelectionDAG &DAG) const {
5243   MemSDNode *StoreNode = cast<MemSDNode>(Op);
5244   assert(StoreNode->getMemoryVT() == MVT::i128);
5245   assert(StoreNode->isVolatile() || StoreNode->isAtomic());
5246   assert(!StoreNode->isAtomic() ||
5247          StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||
5248          StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic);
5249 
5250   SDValue Value = StoreNode->getOpcode() == ISD::STORE
5251                       ? StoreNode->getOperand(1)
5252                       : StoreNode->getOperand(2);
5253   SDLoc DL(Op);
5254   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5255                            DAG.getConstant(0, DL, MVT::i64));
5256   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5257                            DAG.getConstant(1, DL, MVT::i64));
5258   SDValue Result = DAG.getMemIntrinsicNode(
5259       AArch64ISD::STP, DL, DAG.getVTList(MVT::Other),
5260       {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5261       StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5262   return Result;
5263 }
5264 
5265 SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
5266                                          SelectionDAG &DAG) const {
5267   SDLoc DL(Op);
5268   LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
5269   assert(LoadNode && "Expected custom lowering of a load node");
5270 
5271   if (LoadNode->getMemoryVT() == MVT::i64x8) {
5272     SmallVector<SDValue, 8> Ops;
5273     SDValue Base = LoadNode->getBasePtr();
5274     SDValue Chain = LoadNode->getChain();
5275     EVT PtrVT = Base.getValueType();
5276     for (unsigned i = 0; i < 8; i++) {
5277       SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
5278                                 DAG.getConstant(i * 8, DL, PtrVT));
5279       SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr,
5280                                  LoadNode->getPointerInfo(),
5281                                  LoadNode->getOriginalAlign());
5282       Ops.push_back(Part);
5283       Chain = SDValue(Part.getNode(), 1);
5284     }
5285     SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops);
5286     return DAG.getMergeValues({Loaded, Chain}, DL);
5287   }
5288 
5289   // Custom lowering for extending v4i8 vector loads.
5290   EVT VT = Op->getValueType(0);
5291   assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32");
5292 
5293   if (LoadNode->getMemoryVT() != MVT::v4i8)
5294     return SDValue();
5295 
5296   unsigned ExtType;
5297   if (LoadNode->getExtensionType() == ISD::SEXTLOAD)
5298     ExtType = ISD::SIGN_EXTEND;
5299   else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD ||
5300            LoadNode->getExtensionType() == ISD::EXTLOAD)
5301     ExtType = ISD::ZERO_EXTEND;
5302   else
5303     return SDValue();
5304 
5305   SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(),
5306                              LoadNode->getBasePtr(), MachinePointerInfo());
5307   SDValue Chain = Load.getValue(1);
5308   SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load);
5309   SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec);
5310   SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC);
5311   Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext,
5312                     DAG.getConstant(0, DL, MVT::i64));
5313   if (VT == MVT::v4i32)
5314     Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext);
5315   return DAG.getMergeValues({Ext, Chain}, DL);
5316 }
5317 
5318 // Generate SUBS and CSEL for integer abs.
5319 SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
5320   MVT VT = Op.getSimpleValueType();
5321 
5322   if (VT.isVector())
5323     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
5324 
5325   SDLoc DL(Op);
5326   SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
5327                             Op.getOperand(0));
5328   // Generate SUBS & CSEL.
5329   SDValue Cmp =
5330       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
5331                   Op.getOperand(0), DAG.getConstant(0, DL, VT));
5332   return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg,
5333                      DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
5334                      Cmp.getValue(1));
5335 }
5336 
5337 static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
5338   SDValue Chain = Op.getOperand(0);
5339   SDValue Cond = Op.getOperand(1);
5340   SDValue Dest = Op.getOperand(2);
5341 
5342   AArch64CC::CondCode CC;
5343   if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) {
5344     SDLoc dl(Op);
5345     SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32);
5346     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
5347                        Cmp);
5348   }
5349 
5350   return SDValue();
5351 }
5352 
5353 SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
5354                                               SelectionDAG &DAG) const {
5355   LLVM_DEBUG(dbgs() << "Custom lowering: ");
5356   LLVM_DEBUG(Op.dump());
5357 
5358   switch (Op.getOpcode()) {
5359   default:
5360     llvm_unreachable("unimplemented operand");
5361     return SDValue();
5362   case ISD::BITCAST:
5363     return LowerBITCAST(Op, DAG);
5364   case ISD::GlobalAddress:
5365     return LowerGlobalAddress(Op, DAG);
5366   case ISD::GlobalTLSAddress:
5367     return LowerGlobalTLSAddress(Op, DAG);
5368   case ISD::SETCC:
5369   case ISD::STRICT_FSETCC:
5370   case ISD::STRICT_FSETCCS:
5371     return LowerSETCC(Op, DAG);
5372   case ISD::BRCOND:
5373     return LowerBRCOND(Op, DAG);
5374   case ISD::BR_CC:
5375     return LowerBR_CC(Op, DAG);
5376   case ISD::SELECT:
5377     return LowerSELECT(Op, DAG);
5378   case ISD::SELECT_CC:
5379     return LowerSELECT_CC(Op, DAG);
5380   case ISD::JumpTable:
5381     return LowerJumpTable(Op, DAG);
5382   case ISD::BR_JT:
5383     return LowerBR_JT(Op, DAG);
5384   case ISD::ConstantPool:
5385     return LowerConstantPool(Op, DAG);
5386   case ISD::BlockAddress:
5387     return LowerBlockAddress(Op, DAG);
5388   case ISD::VASTART:
5389     return LowerVASTART(Op, DAG);
5390   case ISD::VACOPY:
5391     return LowerVACOPY(Op, DAG);
5392   case ISD::VAARG:
5393     return LowerVAARG(Op, DAG);
5394   case ISD::ADDCARRY:
5395     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
5396   case ISD::SUBCARRY:
5397     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
5398   case ISD::SADDO_CARRY:
5399     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
5400   case ISD::SSUBO_CARRY:
5401     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
5402   case ISD::SADDO:
5403   case ISD::UADDO:
5404   case ISD::SSUBO:
5405   case ISD::USUBO:
5406   case ISD::SMULO:
5407   case ISD::UMULO:
5408     return LowerXALUO(Op, DAG);
5409   case ISD::FADD:
5410     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
5411   case ISD::FSUB:
5412     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
5413   case ISD::FMUL:
5414     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
5415   case ISD::FMA:
5416     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
5417   case ISD::FDIV:
5418     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
5419   case ISD::FNEG:
5420     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
5421   case ISD::FCEIL:
5422     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU);
5423   case ISD::FFLOOR:
5424     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU);
5425   case ISD::FNEARBYINT:
5426     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU);
5427   case ISD::FRINT:
5428     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU);
5429   case ISD::FROUND:
5430     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU);
5431   case ISD::FROUNDEVEN:
5432     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU);
5433   case ISD::FTRUNC:
5434     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU);
5435   case ISD::FSQRT:
5436     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU);
5437   case ISD::FABS:
5438     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU);
5439   case ISD::FP_ROUND:
5440   case ISD::STRICT_FP_ROUND:
5441     return LowerFP_ROUND(Op, DAG);
5442   case ISD::FP_EXTEND:
5443     return LowerFP_EXTEND(Op, DAG);
5444   case ISD::FRAMEADDR:
5445     return LowerFRAMEADDR(Op, DAG);
5446   case ISD::SPONENTRY:
5447     return LowerSPONENTRY(Op, DAG);
5448   case ISD::RETURNADDR:
5449     return LowerRETURNADDR(Op, DAG);
5450   case ISD::ADDROFRETURNADDR:
5451     return LowerADDROFRETURNADDR(Op, DAG);
5452   case ISD::CONCAT_VECTORS:
5453     return LowerCONCAT_VECTORS(Op, DAG);
5454   case ISD::INSERT_VECTOR_ELT:
5455     return LowerINSERT_VECTOR_ELT(Op, DAG);
5456   case ISD::EXTRACT_VECTOR_ELT:
5457     return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5458   case ISD::BUILD_VECTOR:
5459     return LowerBUILD_VECTOR(Op, DAG);
5460   case ISD::VECTOR_SHUFFLE:
5461     return LowerVECTOR_SHUFFLE(Op, DAG);
5462   case ISD::SPLAT_VECTOR:
5463     return LowerSPLAT_VECTOR(Op, DAG);
5464   case ISD::EXTRACT_SUBVECTOR:
5465     return LowerEXTRACT_SUBVECTOR(Op, DAG);
5466   case ISD::INSERT_SUBVECTOR:
5467     return LowerINSERT_SUBVECTOR(Op, DAG);
5468   case ISD::SDIV:
5469   case ISD::UDIV:
5470     return LowerDIV(Op, DAG);
5471   case ISD::SMIN:
5472   case ISD::UMIN:
5473   case ISD::SMAX:
5474   case ISD::UMAX:
5475     return LowerMinMax(Op, DAG);
5476   case ISD::SRA:
5477   case ISD::SRL:
5478   case ISD::SHL:
5479     return LowerVectorSRA_SRL_SHL(Op, DAG);
5480   case ISD::SHL_PARTS:
5481   case ISD::SRL_PARTS:
5482   case ISD::SRA_PARTS:
5483     return LowerShiftParts(Op, DAG);
5484   case ISD::CTPOP:
5485   case ISD::PARITY:
5486     return LowerCTPOP_PARITY(Op, DAG);
5487   case ISD::FCOPYSIGN:
5488     return LowerFCOPYSIGN(Op, DAG);
5489   case ISD::OR:
5490     return LowerVectorOR(Op, DAG);
5491   case ISD::XOR:
5492     return LowerXOR(Op, DAG);
5493   case ISD::PREFETCH:
5494     return LowerPREFETCH(Op, DAG);
5495   case ISD::SINT_TO_FP:
5496   case ISD::UINT_TO_FP:
5497   case ISD::STRICT_SINT_TO_FP:
5498   case ISD::STRICT_UINT_TO_FP:
5499     return LowerINT_TO_FP(Op, DAG);
5500   case ISD::FP_TO_SINT:
5501   case ISD::FP_TO_UINT:
5502   case ISD::STRICT_FP_TO_SINT:
5503   case ISD::STRICT_FP_TO_UINT:
5504     return LowerFP_TO_INT(Op, DAG);
5505   case ISD::FP_TO_SINT_SAT:
5506   case ISD::FP_TO_UINT_SAT:
5507     return LowerFP_TO_INT_SAT(Op, DAG);
5508   case ISD::FSINCOS:
5509     return LowerFSINCOS(Op, DAG);
5510   case ISD::FLT_ROUNDS_:
5511     return LowerFLT_ROUNDS_(Op, DAG);
5512   case ISD::SET_ROUNDING:
5513     return LowerSET_ROUNDING(Op, DAG);
5514   case ISD::MUL:
5515     return LowerMUL(Op, DAG);
5516   case ISD::MULHS:
5517     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED);
5518   case ISD::MULHU:
5519     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED);
5520   case ISD::INTRINSIC_W_CHAIN:
5521     return LowerINTRINSIC_W_CHAIN(Op, DAG);
5522   case ISD::INTRINSIC_WO_CHAIN:
5523     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5524   case ISD::ATOMIC_STORE:
5525     if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) {
5526       assert(Subtarget->hasLSE2());
5527       return LowerStore128(Op, DAG);
5528     }
5529     return SDValue();
5530   case ISD::STORE:
5531     return LowerSTORE(Op, DAG);
5532   case ISD::MSTORE:
5533     return LowerFixedLengthVectorMStoreToSVE(Op, DAG);
5534   case ISD::MGATHER:
5535     return LowerMGATHER(Op, DAG);
5536   case ISD::MSCATTER:
5537     return LowerMSCATTER(Op, DAG);
5538   case ISD::VECREDUCE_SEQ_FADD:
5539     return LowerVECREDUCE_SEQ_FADD(Op, DAG);
5540   case ISD::VECREDUCE_ADD:
5541   case ISD::VECREDUCE_AND:
5542   case ISD::VECREDUCE_OR:
5543   case ISD::VECREDUCE_XOR:
5544   case ISD::VECREDUCE_SMAX:
5545   case ISD::VECREDUCE_SMIN:
5546   case ISD::VECREDUCE_UMAX:
5547   case ISD::VECREDUCE_UMIN:
5548   case ISD::VECREDUCE_FADD:
5549   case ISD::VECREDUCE_FMAX:
5550   case ISD::VECREDUCE_FMIN:
5551     return LowerVECREDUCE(Op, DAG);
5552   case ISD::ATOMIC_LOAD_SUB:
5553     return LowerATOMIC_LOAD_SUB(Op, DAG);
5554   case ISD::ATOMIC_LOAD_AND:
5555     return LowerATOMIC_LOAD_AND(Op, DAG);
5556   case ISD::DYNAMIC_STACKALLOC:
5557     return LowerDYNAMIC_STACKALLOC(Op, DAG);
5558   case ISD::VSCALE:
5559     return LowerVSCALE(Op, DAG);
5560   case ISD::ANY_EXTEND:
5561   case ISD::SIGN_EXTEND:
5562   case ISD::ZERO_EXTEND:
5563     return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
5564   case ISD::SIGN_EXTEND_INREG: {
5565     // Only custom lower when ExtraVT has a legal byte based element type.
5566     EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
5567     EVT ExtraEltVT = ExtraVT.getVectorElementType();
5568     if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) &&
5569         (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64))
5570       return SDValue();
5571 
5572     return LowerToPredicatedOp(Op, DAG,
5573                                AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU);
5574   }
5575   case ISD::TRUNCATE:
5576     return LowerTRUNCATE(Op, DAG);
5577   case ISD::MLOAD:
5578     return LowerMLOAD(Op, DAG);
5579   case ISD::LOAD:
5580     if (useSVEForFixedLengthVectorVT(Op.getValueType()))
5581       return LowerFixedLengthVectorLoadToSVE(Op, DAG);
5582     return LowerLOAD(Op, DAG);
5583   case ISD::ADD:
5584   case ISD::AND:
5585   case ISD::SUB:
5586     return LowerToScalableOp(Op, DAG);
5587   case ISD::FMAXIMUM:
5588     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED);
5589   case ISD::FMAXNUM:
5590     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED);
5591   case ISD::FMINIMUM:
5592     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED);
5593   case ISD::FMINNUM:
5594     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED);
5595   case ISD::VSELECT:
5596     return LowerFixedLengthVectorSelectToSVE(Op, DAG);
5597   case ISD::ABS:
5598     return LowerABS(Op, DAG);
5599   case ISD::ABDS:
5600     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
5601   case ISD::ABDU:
5602     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
5603   case ISD::BITREVERSE:
5604     return LowerBitreverse(Op, DAG);
5605   case ISD::BSWAP:
5606     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
5607   case ISD::CTLZ:
5608     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU);
5609   case ISD::CTTZ:
5610     return LowerCTTZ(Op, DAG);
5611   case ISD::VECTOR_SPLICE:
5612     return LowerVECTOR_SPLICE(Op, DAG);
5613   case ISD::STRICT_LROUND:
5614   case ISD::STRICT_LLROUND:
5615   case ISD::STRICT_LRINT:
5616   case ISD::STRICT_LLRINT: {
5617     assert(Op.getOperand(1).getValueType() == MVT::f16 &&
5618            "Expected custom lowering of rounding operations only for f16");
5619     SDLoc DL(Op);
5620     SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
5621                               {Op.getOperand(0), Op.getOperand(1)});
5622     return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
5623                        {Ext.getValue(1), Ext.getValue(0)});
5624   }
5625   }
5626 }
5627 
5628 bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
5629   return !Subtarget->useSVEForFixedLengthVectors();
5630 }
5631 
5632 bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
5633     EVT VT, bool OverrideNEON) const {
5634   if (!VT.isFixedLengthVector() || !VT.isSimple())
5635     return false;
5636 
5637   // Don't use SVE for vectors we cannot scalarize if required.
5638   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
5639   // Fixed length predicates should be promoted to i8.
5640   // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
5641   case MVT::i1:
5642   default:
5643     return false;
5644   case MVT::i8:
5645   case MVT::i16:
5646   case MVT::i32:
5647   case MVT::i64:
5648   case MVT::f16:
5649   case MVT::f32:
5650   case MVT::f64:
5651     break;
5652   }
5653 
5654   // All SVE implementations support NEON sized vectors.
5655   if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
5656     return Subtarget->hasSVE();
5657 
5658   // Ensure NEON MVTs only belong to a single register class.
5659   if (VT.getFixedSizeInBits() <= 128)
5660     return false;
5661 
5662   // Ensure wider than NEON code generation is enabled.
5663   if (!Subtarget->useSVEForFixedLengthVectors())
5664     return false;
5665 
5666   // Don't use SVE for types that don't fit.
5667   if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits())
5668     return false;
5669 
5670   // TODO: Perhaps an artificial restriction, but worth having whilst getting
5671   // the base fixed length SVE support in place.
5672   if (!VT.isPow2VectorType())
5673     return false;
5674 
5675   return true;
5676 }
5677 
5678 //===----------------------------------------------------------------------===//
5679 //                      Calling Convention Implementation
5680 //===----------------------------------------------------------------------===//
5681 
5682 static unsigned getIntrinsicID(const SDNode *N) {
5683   unsigned Opcode = N->getOpcode();
5684   switch (Opcode) {
5685   default:
5686     return Intrinsic::not_intrinsic;
5687   case ISD::INTRINSIC_WO_CHAIN: {
5688     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5689     if (IID < Intrinsic::num_intrinsics)
5690       return IID;
5691     return Intrinsic::not_intrinsic;
5692   }
5693   }
5694 }
5695 
5696 bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
5697                                                 SDValue N1) const {
5698   if (!N0.hasOneUse())
5699     return false;
5700 
5701   unsigned IID = getIntrinsicID(N1.getNode());
5702   // Avoid reassociating expressions that can be lowered to smlal/umlal.
5703   if (IID == Intrinsic::aarch64_neon_umull ||
5704       N1.getOpcode() == AArch64ISD::UMULL ||
5705       IID == Intrinsic::aarch64_neon_smull ||
5706       N1.getOpcode() == AArch64ISD::SMULL)
5707     return N0.getOpcode() != ISD::ADD;
5708 
5709   return true;
5710 }
5711 
5712 /// Selects the correct CCAssignFn for a given CallingConvention value.
5713 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
5714                                                      bool IsVarArg) const {
5715   switch (CC) {
5716   default:
5717     report_fatal_error("Unsupported calling convention.");
5718   case CallingConv::WebKit_JS:
5719     return CC_AArch64_WebKit_JS;
5720   case CallingConv::GHC:
5721     return CC_AArch64_GHC;
5722   case CallingConv::C:
5723   case CallingConv::Fast:
5724   case CallingConv::PreserveMost:
5725   case CallingConv::CXX_FAST_TLS:
5726   case CallingConv::Swift:
5727   case CallingConv::SwiftTail:
5728   case CallingConv::Tail:
5729     if (Subtarget->isTargetWindows() && IsVarArg)
5730       return CC_AArch64_Win64_VarArg;
5731     if (!Subtarget->isTargetDarwin())
5732       return CC_AArch64_AAPCS;
5733     if (!IsVarArg)
5734       return CC_AArch64_DarwinPCS;
5735     return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg
5736                                       : CC_AArch64_DarwinPCS_VarArg;
5737    case CallingConv::Win64:
5738     return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
5739    case CallingConv::CFGuard_Check:
5740      return CC_AArch64_Win64_CFGuard_Check;
5741    case CallingConv::AArch64_VectorCall:
5742    case CallingConv::AArch64_SVE_VectorCall:
5743      return CC_AArch64_AAPCS;
5744   }
5745 }
5746 
5747 CCAssignFn *
5748 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
5749   return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
5750                                       : RetCC_AArch64_AAPCS;
5751 }
5752 
5753 SDValue AArch64TargetLowering::LowerFormalArguments(
5754     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
5755     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5756     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5757   MachineFunction &MF = DAG.getMachineFunction();
5758   const Function &F = MF.getFunction();
5759   MachineFrameInfo &MFI = MF.getFrameInfo();
5760   bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv());
5761   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
5762 
5763   SmallVector<ISD::OutputArg, 4> Outs;
5764   GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs,
5765                 DAG.getTargetLoweringInfo(), MF.getDataLayout());
5766   if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }))
5767     FuncInfo->setIsSVECC(true);
5768 
5769   // Assign locations to all of the incoming arguments.
5770   SmallVector<CCValAssign, 16> ArgLocs;
5771   DenseMap<unsigned, SDValue> CopiedRegs;
5772   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5773 
5774   // At this point, Ins[].VT may already be promoted to i32. To correctly
5775   // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
5776   // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
5777   // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
5778   // we use a special version of AnalyzeFormalArguments to pass in ValVT and
5779   // LocVT.
5780   unsigned NumArgs = Ins.size();
5781   Function::const_arg_iterator CurOrigArg = F.arg_begin();
5782   unsigned CurArgIdx = 0;
5783   for (unsigned i = 0; i != NumArgs; ++i) {
5784     MVT ValVT = Ins[i].VT;
5785     if (Ins[i].isOrigArg()) {
5786       std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
5787       CurArgIdx = Ins[i].getOrigArgIndex();
5788 
5789       // Get type of the original argument.
5790       EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(),
5791                                   /*AllowUnknown*/ true);
5792       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
5793       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
5794       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
5795         ValVT = MVT::i8;
5796       else if (ActualMVT == MVT::i16)
5797         ValVT = MVT::i16;
5798     }
5799     bool UseVarArgCC = false;
5800     if (IsWin64)
5801       UseVarArgCC = isVarArg;
5802     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
5803     bool Res =
5804         AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
5805     assert(!Res && "Call operand has unhandled type");
5806     (void)Res;
5807   }
5808 
5809   unsigned ExtraArgLocs = 0;
5810   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5811     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
5812 
5813     if (Ins[i].Flags.isByVal()) {
5814       // Byval is used for HFAs in the PCS, but the system should work in a
5815       // non-compliant manner for larger structs.
5816       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5817       int Size = Ins[i].Flags.getByValSize();
5818       unsigned NumRegs = (Size + 7) / 8;
5819 
5820       // FIXME: This works on big-endian for composite byvals, which are the common
5821       // case. It should also work for fundamental types too.
5822       unsigned FrameIdx =
5823         MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
5824       SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT);
5825       InVals.push_back(FrameIdxN);
5826 
5827       continue;
5828     }
5829 
5830     if (Ins[i].Flags.isSwiftAsync())
5831       MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5832 
5833     SDValue ArgValue;
5834     if (VA.isRegLoc()) {
5835       // Arguments stored in registers.
5836       EVT RegVT = VA.getLocVT();
5837       const TargetRegisterClass *RC;
5838 
5839       if (RegVT == MVT::i32)
5840         RC = &AArch64::GPR32RegClass;
5841       else if (RegVT == MVT::i64)
5842         RC = &AArch64::GPR64RegClass;
5843       else if (RegVT == MVT::f16 || RegVT == MVT::bf16)
5844         RC = &AArch64::FPR16RegClass;
5845       else if (RegVT == MVT::f32)
5846         RC = &AArch64::FPR32RegClass;
5847       else if (RegVT == MVT::f64 || RegVT.is64BitVector())
5848         RC = &AArch64::FPR64RegClass;
5849       else if (RegVT == MVT::f128 || RegVT.is128BitVector())
5850         RC = &AArch64::FPR128RegClass;
5851       else if (RegVT.isScalableVector() &&
5852                RegVT.getVectorElementType() == MVT::i1) {
5853         FuncInfo->setIsSVECC(true);
5854         RC = &AArch64::PPRRegClass;
5855       } else if (RegVT.isScalableVector()) {
5856         FuncInfo->setIsSVECC(true);
5857         RC = &AArch64::ZPRRegClass;
5858       } else
5859         llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
5860 
5861       // Transform the arguments in physical registers into virtual ones.
5862       Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
5863       ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
5864 
5865       // If this is an 8, 16 or 32-bit value, it is really passed promoted
5866       // to 64 bits.  Insert an assert[sz]ext to capture this, then
5867       // truncate to the right size.
5868       switch (VA.getLocInfo()) {
5869       default:
5870         llvm_unreachable("Unknown loc info!");
5871       case CCValAssign::Full:
5872         break;
5873       case CCValAssign::Indirect:
5874         assert(VA.getValVT().isScalableVector() &&
5875                "Only scalable vectors can be passed indirectly");
5876         break;
5877       case CCValAssign::BCvt:
5878         ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
5879         break;
5880       case CCValAssign::AExt:
5881       case CCValAssign::SExt:
5882       case CCValAssign::ZExt:
5883         break;
5884       case CCValAssign::AExtUpper:
5885         ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue,
5886                                DAG.getConstant(32, DL, RegVT));
5887         ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT());
5888         break;
5889       }
5890     } else { // VA.isRegLoc()
5891       assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
5892       unsigned ArgOffset = VA.getLocMemOffset();
5893       unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect
5894                               ? VA.getLocVT().getSizeInBits()
5895                               : VA.getValVT().getSizeInBits()) / 8;
5896 
5897       uint32_t BEAlign = 0;
5898       if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
5899           !Ins[i].Flags.isInConsecutiveRegs())
5900         BEAlign = 8 - ArgSize;
5901 
5902       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
5903 
5904       // Create load nodes to retrieve arguments from the stack.
5905       SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5906 
5907       // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
5908       ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
5909       MVT MemVT = VA.getValVT();
5910 
5911       switch (VA.getLocInfo()) {
5912       default:
5913         break;
5914       case CCValAssign::Trunc:
5915       case CCValAssign::BCvt:
5916         MemVT = VA.getLocVT();
5917         break;
5918       case CCValAssign::Indirect:
5919         assert(VA.getValVT().isScalableVector() &&
5920                "Only scalable vectors can be passed indirectly");
5921         MemVT = VA.getLocVT();
5922         break;
5923       case CCValAssign::SExt:
5924         ExtType = ISD::SEXTLOAD;
5925         break;
5926       case CCValAssign::ZExt:
5927         ExtType = ISD::ZEXTLOAD;
5928         break;
5929       case CCValAssign::AExt:
5930         ExtType = ISD::EXTLOAD;
5931         break;
5932       }
5933 
5934       ArgValue =
5935           DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN,
5936                          MachinePointerInfo::getFixedStack(MF, FI), MemVT);
5937     }
5938 
5939     if (VA.getLocInfo() == CCValAssign::Indirect) {
5940       assert(VA.getValVT().isScalableVector() &&
5941            "Only scalable vectors can be passed indirectly");
5942 
5943       uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize();
5944       unsigned NumParts = 1;
5945       if (Ins[i].Flags.isInConsecutiveRegs()) {
5946         assert(!Ins[i].Flags.isInConsecutiveRegsLast());
5947         while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
5948           ++NumParts;
5949       }
5950 
5951       MVT PartLoad = VA.getValVT();
5952       SDValue Ptr = ArgValue;
5953 
5954       // Ensure we generate all loads for each tuple part, whilst updating the
5955       // pointer after each load correctly using vscale.
5956       while (NumParts > 0) {
5957         ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo());
5958         InVals.push_back(ArgValue);
5959         NumParts--;
5960         if (NumParts > 0) {
5961           SDValue BytesIncrement = DAG.getVScale(
5962               DL, Ptr.getValueType(),
5963               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
5964           SDNodeFlags Flags;
5965           Flags.setNoUnsignedWrap(true);
5966           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
5967                             BytesIncrement, Flags);
5968           ExtraArgLocs++;
5969           i++;
5970         }
5971       }
5972     } else {
5973       if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer())
5974         ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(),
5975                                ArgValue, DAG.getValueType(MVT::i32));
5976 
5977       // i1 arguments are zero-extended to i8 by the caller. Emit a
5978       // hint to reflect this.
5979       if (Ins[i].isOrigArg()) {
5980         Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex());
5981         if (OrigArg->getType()->isIntegerTy(1)) {
5982           if (!Ins[i].Flags.isZExt()) {
5983             ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL,
5984                                    ArgValue.getValueType(), ArgValue);
5985           }
5986         }
5987       }
5988 
5989       InVals.push_back(ArgValue);
5990     }
5991   }
5992   assert((ArgLocs.size() + ExtraArgLocs) == Ins.size());
5993 
5994   // varargs
5995   if (isVarArg) {
5996     if (!Subtarget->isTargetDarwin() || IsWin64) {
5997       // The AAPCS variadic function ABI is identical to the non-variadic
5998       // one. As a result there may be more arguments in registers and we should
5999       // save them for future reference.
6000       // Win64 variadic functions also pass arguments in registers, but all float
6001       // arguments are passed in integer registers.
6002       saveVarArgRegisters(CCInfo, DAG, DL, Chain);
6003     }
6004 
6005     // This will point to the next argument passed via stack.
6006     unsigned StackOffset = CCInfo.getNextStackOffset();
6007     // We currently pass all varargs at 8-byte alignment, or 4 for ILP32
6008     StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
6009     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
6010 
6011     if (MFI.hasMustTailInVarArgFunc()) {
6012       SmallVector<MVT, 2> RegParmTypes;
6013       RegParmTypes.push_back(MVT::i64);
6014       RegParmTypes.push_back(MVT::f128);
6015       // Compute the set of forwarded registers. The rest are scratch.
6016       SmallVectorImpl<ForwardedRegister> &Forwards =
6017                                        FuncInfo->getForwardedMustTailRegParms();
6018       CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
6019                                                CC_AArch64_AAPCS);
6020 
6021       // Conservatively forward X8, since it might be used for aggregate return.
6022       if (!CCInfo.isAllocated(AArch64::X8)) {
6023         Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
6024         Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
6025       }
6026     }
6027   }
6028 
6029   // On Windows, InReg pointers must be returned, so record the pointer in a
6030   // virtual register at the start of the function so it can be returned in the
6031   // epilogue.
6032   if (IsWin64) {
6033     for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
6034       if (Ins[I].Flags.isInReg()) {
6035         assert(!FuncInfo->getSRetReturnReg());
6036 
6037         MVT PtrTy = getPointerTy(DAG.getDataLayout());
6038         Register Reg =
6039             MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
6040         FuncInfo->setSRetReturnReg(Reg);
6041 
6042         SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
6043         Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
6044         break;
6045       }
6046     }
6047   }
6048 
6049   unsigned StackArgSize = CCInfo.getNextStackOffset();
6050   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6051   if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
6052     // This is a non-standard ABI so by fiat I say we're allowed to make full
6053     // use of the stack area to be popped, which must be aligned to 16 bytes in
6054     // any case:
6055     StackArgSize = alignTo(StackArgSize, 16);
6056 
6057     // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
6058     // a multiple of 16.
6059     FuncInfo->setArgumentStackToRestore(StackArgSize);
6060 
6061     // This realignment carries over to the available bytes below. Our own
6062     // callers will guarantee the space is free by giving an aligned value to
6063     // CALLSEQ_START.
6064   }
6065   // Even if we're not expected to free up the space, it's useful to know how
6066   // much is there while considering tail calls (because we can reuse it).
6067   FuncInfo->setBytesInStackArgArea(StackArgSize);
6068 
6069   if (Subtarget->hasCustomCallingConv())
6070     Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
6071 
6072   return Chain;
6073 }
6074 
6075 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
6076                                                 SelectionDAG &DAG,
6077                                                 const SDLoc &DL,
6078                                                 SDValue &Chain) const {
6079   MachineFunction &MF = DAG.getMachineFunction();
6080   MachineFrameInfo &MFI = MF.getFrameInfo();
6081   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6082   auto PtrVT = getPointerTy(DAG.getDataLayout());
6083   bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
6084 
6085   SmallVector<SDValue, 8> MemOps;
6086 
6087   static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
6088                                           AArch64::X3, AArch64::X4, AArch64::X5,
6089                                           AArch64::X6, AArch64::X7 };
6090   static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
6091   unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
6092 
6093   unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
6094   int GPRIdx = 0;
6095   if (GPRSaveSize != 0) {
6096     if (IsWin64) {
6097       GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
6098       if (GPRSaveSize & 15)
6099         // The extra size here, if triggered, will always be 8.
6100         MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
6101     } else
6102       GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
6103 
6104     SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
6105 
6106     for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
6107       Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
6108       SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
6109       SDValue Store =
6110           DAG.getStore(Val.getValue(1), DL, Val, FIN,
6111                        IsWin64 ? MachinePointerInfo::getFixedStack(
6112                                      MF, GPRIdx, (i - FirstVariadicGPR) * 8)
6113                                : MachinePointerInfo::getStack(MF, i * 8));
6114       MemOps.push_back(Store);
6115       FIN =
6116           DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
6117     }
6118   }
6119   FuncInfo->setVarArgsGPRIndex(GPRIdx);
6120   FuncInfo->setVarArgsGPRSize(GPRSaveSize);
6121 
6122   if (Subtarget->hasFPARMv8() && !IsWin64) {
6123     static const MCPhysReg FPRArgRegs[] = {
6124         AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
6125         AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
6126     static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
6127     unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
6128 
6129     unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
6130     int FPRIdx = 0;
6131     if (FPRSaveSize != 0) {
6132       FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
6133 
6134       SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
6135 
6136       for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
6137         Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
6138         SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
6139 
6140         SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
6141                                      MachinePointerInfo::getStack(MF, i * 16));
6142         MemOps.push_back(Store);
6143         FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
6144                           DAG.getConstant(16, DL, PtrVT));
6145       }
6146     }
6147     FuncInfo->setVarArgsFPRIndex(FPRIdx);
6148     FuncInfo->setVarArgsFPRSize(FPRSaveSize);
6149   }
6150 
6151   if (!MemOps.empty()) {
6152     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
6153   }
6154 }
6155 
6156 /// LowerCallResult - Lower the result values of a call into the
6157 /// appropriate copies out of appropriate physical registers.
6158 SDValue AArch64TargetLowering::LowerCallResult(
6159     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
6160     const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL,
6161     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
6162     SDValue ThisVal) const {
6163   DenseMap<unsigned, SDValue> CopiedRegs;
6164   // Copy all of the result registers out of their specified physreg.
6165   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6166     CCValAssign VA = RVLocs[i];
6167 
6168     // Pass 'this' value directly from the argument to return value, to avoid
6169     // reg unit interference
6170     if (i == 0 && isThisReturn) {
6171       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
6172              "unexpected return calling convention register assignment");
6173       InVals.push_back(ThisVal);
6174       continue;
6175     }
6176 
6177     // Avoid copying a physreg twice since RegAllocFast is incompetent and only
6178     // allows one use of a physreg per block.
6179     SDValue Val = CopiedRegs.lookup(VA.getLocReg());
6180     if (!Val) {
6181       Val =
6182           DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
6183       Chain = Val.getValue(1);
6184       InFlag = Val.getValue(2);
6185       CopiedRegs[VA.getLocReg()] = Val;
6186     }
6187 
6188     switch (VA.getLocInfo()) {
6189     default:
6190       llvm_unreachable("Unknown loc info!");
6191     case CCValAssign::Full:
6192       break;
6193     case CCValAssign::BCvt:
6194       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6195       break;
6196     case CCValAssign::AExtUpper:
6197       Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
6198                         DAG.getConstant(32, DL, VA.getLocVT()));
6199       LLVM_FALLTHROUGH;
6200     case CCValAssign::AExt:
6201       LLVM_FALLTHROUGH;
6202     case CCValAssign::ZExt:
6203       Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
6204       break;
6205     }
6206 
6207     InVals.push_back(Val);
6208   }
6209 
6210   return Chain;
6211 }
6212 
6213 /// Return true if the calling convention is one that we can guarantee TCO for.
6214 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
6215   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
6216          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
6217 }
6218 
6219 /// Return true if we might ever do TCO for calls with this calling convention.
6220 static bool mayTailCallThisCC(CallingConv::ID CC) {
6221   switch (CC) {
6222   case CallingConv::C:
6223   case CallingConv::AArch64_SVE_VectorCall:
6224   case CallingConv::PreserveMost:
6225   case CallingConv::Swift:
6226   case CallingConv::SwiftTail:
6227   case CallingConv::Tail:
6228   case CallingConv::Fast:
6229     return true;
6230   default:
6231     return false;
6232   }
6233 }
6234 
6235 static void analyzeCallOperands(const AArch64TargetLowering &TLI,
6236                                 const AArch64Subtarget *Subtarget,
6237                                 const TargetLowering::CallLoweringInfo &CLI,
6238                                 CCState &CCInfo) {
6239   const SelectionDAG &DAG = CLI.DAG;
6240   CallingConv::ID CalleeCC = CLI.CallConv;
6241   bool IsVarArg = CLI.IsVarArg;
6242   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6243   bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
6244 
6245   unsigned NumArgs = Outs.size();
6246   for (unsigned i = 0; i != NumArgs; ++i) {
6247     MVT ArgVT = Outs[i].VT;
6248     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6249 
6250     bool UseVarArgCC = false;
6251     if (IsVarArg) {
6252       // On Windows, the fixed arguments in a vararg call are passed in GPRs
6253       // too, so use the vararg CC to force them to integer registers.
6254       if (IsCalleeWin64) {
6255         UseVarArgCC = true;
6256       } else {
6257         UseVarArgCC = !Outs[i].IsFixed;
6258       }
6259     } else {
6260       // Get type of the original argument.
6261       EVT ActualVT =
6262           TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty,
6263                        /*AllowUnknown*/ true);
6264       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT;
6265       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
6266       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
6267         ArgVT = MVT::i8;
6268       else if (ActualMVT == MVT::i16)
6269         ArgVT = MVT::i16;
6270     }
6271 
6272     CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC);
6273     bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
6274     assert(!Res && "Call operand has unhandled type");
6275     (void)Res;
6276   }
6277 }
6278 
6279 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
6280     const CallLoweringInfo &CLI) const {
6281   CallingConv::ID CalleeCC = CLI.CallConv;
6282   if (!mayTailCallThisCC(CalleeCC))
6283     return false;
6284 
6285   SDValue Callee = CLI.Callee;
6286   bool IsVarArg = CLI.IsVarArg;
6287   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6288   const SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6289   const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6290   const SelectionDAG &DAG = CLI.DAG;
6291   MachineFunction &MF = DAG.getMachineFunction();
6292   const Function &CallerF = MF.getFunction();
6293   CallingConv::ID CallerCC = CallerF.getCallingConv();
6294 
6295   // Functions using the C or Fast calling convention that have an SVE signature
6296   // preserve more registers and should assume the SVE_VectorCall CC.
6297   // The check for matching callee-saved regs will determine whether it is
6298   // eligible for TCO.
6299   if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) &&
6300       MF.getInfo<AArch64FunctionInfo>()->isSVECC())
6301     CallerCC = CallingConv::AArch64_SVE_VectorCall;
6302 
6303   bool CCMatch = CallerCC == CalleeCC;
6304 
6305   // When using the Windows calling convention on a non-windows OS, we want
6306   // to back up and restore X18 in such functions; we can't do a tail call
6307   // from those functions.
6308   if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() &&
6309       CalleeCC != CallingConv::Win64)
6310     return false;
6311 
6312   // Byval parameters hand the function a pointer directly into the stack area
6313   // we want to reuse during a tail call. Working around this *is* possible (see
6314   // X86) but less efficient and uglier in LowerCall.
6315   for (Function::const_arg_iterator i = CallerF.arg_begin(),
6316                                     e = CallerF.arg_end();
6317        i != e; ++i) {
6318     if (i->hasByValAttr())
6319       return false;
6320 
6321     // On Windows, "inreg" attributes signify non-aggregate indirect returns.
6322     // In this case, it is necessary to save/restore X0 in the callee. Tail
6323     // call opt interferes with this. So we disable tail call opt when the
6324     // caller has an argument with "inreg" attribute.
6325 
6326     // FIXME: Check whether the callee also has an "inreg" argument.
6327     if (i->hasInRegAttr())
6328       return false;
6329   }
6330 
6331   if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
6332     return CCMatch;
6333 
6334   // Externally-defined functions with weak linkage should not be
6335   // tail-called on AArch64 when the OS does not support dynamic
6336   // pre-emption of symbols, as the AAELF spec requires normal calls
6337   // to undefined weak functions to be replaced with a NOP or jump to the
6338   // next instruction. The behaviour of branch instructions in this
6339   // situation (as used for tail calls) is implementation-defined, so we
6340   // cannot rely on the linker replacing the tail call with a return.
6341   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6342     const GlobalValue *GV = G->getGlobal();
6343     const Triple &TT = getTargetMachine().getTargetTriple();
6344     if (GV->hasExternalWeakLinkage() &&
6345         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
6346       return false;
6347   }
6348 
6349   // Now we search for cases where we can use a tail call without changing the
6350   // ABI. Sibcall is used in some places (particularly gcc) to refer to this
6351   // concept.
6352 
6353   // I want anyone implementing a new calling convention to think long and hard
6354   // about this assert.
6355   assert((!IsVarArg || CalleeCC == CallingConv::C) &&
6356          "Unexpected variadic calling convention");
6357 
6358   LLVMContext &C = *DAG.getContext();
6359   // Check that the call results are passed in the same way.
6360   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
6361                                   CCAssignFnForCall(CalleeCC, IsVarArg),
6362                                   CCAssignFnForCall(CallerCC, IsVarArg)))
6363     return false;
6364   // The callee has to preserve all registers the caller needs to preserve.
6365   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6366   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6367   if (!CCMatch) {
6368     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6369     if (Subtarget->hasCustomCallingConv()) {
6370       TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
6371       TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
6372     }
6373     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6374       return false;
6375   }
6376 
6377   // Nothing more to check if the callee is taking no arguments
6378   if (Outs.empty())
6379     return true;
6380 
6381   SmallVector<CCValAssign, 16> ArgLocs;
6382   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
6383 
6384   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6385 
6386   if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) {
6387     // When we are musttail, additional checks have been done and we can safely ignore this check
6388     // At least two cases here: if caller is fastcc then we can't have any
6389     // memory arguments (we'd be expected to clean up the stack afterwards). If
6390     // caller is C then we could potentially use its argument area.
6391 
6392     // FIXME: for now we take the most conservative of these in both cases:
6393     // disallow all variadic memory operands.
6394     for (const CCValAssign &ArgLoc : ArgLocs)
6395       if (!ArgLoc.isRegLoc())
6396         return false;
6397   }
6398 
6399   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6400 
6401   // If any of the arguments is passed indirectly, it must be SVE, so the
6402   // 'getBytesInStackArgArea' is not sufficient to determine whether we need to
6403   // allocate space on the stack. That is why we determine this explicitly here
6404   // the call cannot be a tailcall.
6405   if (llvm::any_of(ArgLocs, [](CCValAssign &A) {
6406         assert((A.getLocInfo() != CCValAssign::Indirect ||
6407                 A.getValVT().isScalableVector()) &&
6408                "Expected value to be scalable");
6409         return A.getLocInfo() == CCValAssign::Indirect;
6410       }))
6411     return false;
6412 
6413   // If the stack arguments for this call do not fit into our own save area then
6414   // the call cannot be made tail.
6415   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
6416     return false;
6417 
6418   const MachineRegisterInfo &MRI = MF.getRegInfo();
6419   if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
6420     return false;
6421 
6422   return true;
6423 }
6424 
6425 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
6426                                                    SelectionDAG &DAG,
6427                                                    MachineFrameInfo &MFI,
6428                                                    int ClobberedFI) const {
6429   SmallVector<SDValue, 8> ArgChains;
6430   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
6431   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
6432 
6433   // Include the original chain at the beginning of the list. When this is
6434   // used by target LowerCall hooks, this helps legalize find the
6435   // CALLSEQ_BEGIN node.
6436   ArgChains.push_back(Chain);
6437 
6438   // Add a chain value for each stack argument corresponding
6439   for (SDNode *U : DAG.getEntryNode().getNode()->uses())
6440     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
6441       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6442         if (FI->getIndex() < 0) {
6443           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
6444           int64_t InLastByte = InFirstByte;
6445           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
6446 
6447           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
6448               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
6449             ArgChains.push_back(SDValue(L, 1));
6450         }
6451 
6452   // Build a tokenfactor for all the chains.
6453   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6454 }
6455 
6456 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
6457                                                    bool TailCallOpt) const {
6458   return (CallCC == CallingConv::Fast && TailCallOpt) ||
6459          CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail;
6460 }
6461 
6462 // Check if the value is zero-extended from i1 to i8
6463 static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
6464   unsigned SizeInBits = Arg.getValueType().getSizeInBits();
6465   if (SizeInBits < 8)
6466     return false;
6467 
6468   APInt RequredZero(SizeInBits, 0xFE);
6469   KnownBits Bits = DAG.computeKnownBits(Arg, 4);
6470   bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
6471   return ZExtBool;
6472 }
6473 
6474 /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
6475 /// and add input and output parameter nodes.
6476 SDValue
6477 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
6478                                  SmallVectorImpl<SDValue> &InVals) const {
6479   SelectionDAG &DAG = CLI.DAG;
6480   SDLoc &DL = CLI.DL;
6481   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6482   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6483   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6484   SDValue Chain = CLI.Chain;
6485   SDValue Callee = CLI.Callee;
6486   bool &IsTailCall = CLI.IsTailCall;
6487   CallingConv::ID &CallConv = CLI.CallConv;
6488   bool IsVarArg = CLI.IsVarArg;
6489 
6490   MachineFunction &MF = DAG.getMachineFunction();
6491   MachineFunction::CallSiteInfo CSInfo;
6492   bool IsThisReturn = false;
6493 
6494   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6495   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6496   bool IsSibCall = false;
6497   bool GuardWithBTI = false;
6498 
6499   if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
6500       !Subtarget->noBTIAtReturnTwice()) {
6501     GuardWithBTI = FuncInfo->branchTargetEnforcement();
6502   }
6503 
6504   // Analyze operands of the call, assigning locations to each operand.
6505   SmallVector<CCValAssign, 16> ArgLocs;
6506   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6507 
6508   if (IsVarArg) {
6509     unsigned NumArgs = Outs.size();
6510 
6511     for (unsigned i = 0; i != NumArgs; ++i) {
6512       if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
6513         report_fatal_error("Passing SVE types to variadic functions is "
6514                            "currently not supported");
6515     }
6516   }
6517 
6518   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6519 
6520   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6521   // Assign locations to each value returned by this call.
6522   SmallVector<CCValAssign, 16> RVLocs;
6523   CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
6524                     *DAG.getContext());
6525   RetCCInfo.AnalyzeCallResult(Ins, RetCC);
6526 
6527   // Check callee args/returns for SVE registers and set calling convention
6528   // accordingly.
6529   if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
6530     auto HasSVERegLoc = [](CCValAssign &Loc) {
6531       if (!Loc.isRegLoc())
6532         return false;
6533       return AArch64::ZPRRegClass.contains(Loc.getLocReg()) ||
6534              AArch64::PPRRegClass.contains(Loc.getLocReg());
6535     };
6536     if (any_of(RVLocs, HasSVERegLoc) || any_of(ArgLocs, HasSVERegLoc))
6537       CallConv = CallingConv::AArch64_SVE_VectorCall;
6538   }
6539 
6540   if (IsTailCall) {
6541     // Check if it's really possible to do a tail call.
6542     IsTailCall = isEligibleForTailCallOptimization(CLI);
6543 
6544     // A sibling call is one where we're under the usual C ABI and not planning
6545     // to change that but can still do a tail call:
6546     if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail &&
6547         CallConv != CallingConv::SwiftTail)
6548       IsSibCall = true;
6549 
6550     if (IsTailCall)
6551       ++NumTailCalls;
6552   }
6553 
6554   if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
6555     report_fatal_error("failed to perform tail call elimination on a call "
6556                        "site marked musttail");
6557 
6558   // Get a count of how many bytes are to be pushed on the stack.
6559   unsigned NumBytes = CCInfo.getNextStackOffset();
6560 
6561   if (IsSibCall) {
6562     // Since we're not changing the ABI to make this a tail call, the memory
6563     // operands are already available in the caller's incoming argument space.
6564     NumBytes = 0;
6565   }
6566 
6567   // FPDiff is the byte offset of the call's argument area from the callee's.
6568   // Stores to callee stack arguments will be placed in FixedStackSlots offset
6569   // by this amount for a tail call. In a sibling call it must be 0 because the
6570   // caller will deallocate the entire stack and the callee still expects its
6571   // arguments to begin at SP+0. Completely unused for non-tail calls.
6572   int FPDiff = 0;
6573 
6574   if (IsTailCall && !IsSibCall) {
6575     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
6576 
6577     // Since callee will pop argument stack as a tail call, we must keep the
6578     // popped size 16-byte aligned.
6579     NumBytes = alignTo(NumBytes, 16);
6580 
6581     // FPDiff will be negative if this tail call requires more space than we
6582     // would automatically have in our incoming argument space. Positive if we
6583     // can actually shrink the stack.
6584     FPDiff = NumReusableBytes - NumBytes;
6585 
6586     // Update the required reserved area if this is the tail call requiring the
6587     // most argument stack space.
6588     if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
6589       FuncInfo->setTailCallReservedStack(-FPDiff);
6590 
6591     // The stack pointer must be 16-byte aligned at all times it's used for a
6592     // memory operation, which in practice means at *all* times and in
6593     // particular across call boundaries. Therefore our own arguments started at
6594     // a 16-byte aligned SP and the delta applied for the tail call should
6595     // satisfy the same constraint.
6596     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
6597   }
6598 
6599   // Adjust the stack pointer for the new arguments...
6600   // These operations are automatically eliminated by the prolog/epilog pass
6601   if (!IsSibCall)
6602     Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL);
6603 
6604   SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
6605                                         getPointerTy(DAG.getDataLayout()));
6606 
6607   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6608   SmallSet<unsigned, 8> RegsUsed;
6609   SmallVector<SDValue, 8> MemOpChains;
6610   auto PtrVT = getPointerTy(DAG.getDataLayout());
6611 
6612   if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
6613     const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
6614     for (const auto &F : Forwards) {
6615       SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
6616        RegsToPass.emplace_back(F.PReg, Val);
6617     }
6618   }
6619 
6620   // Walk the register/memloc assignments, inserting copies/loads.
6621   unsigned ExtraArgLocs = 0;
6622   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6623     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
6624     SDValue Arg = OutVals[i];
6625     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6626 
6627     // Promote the value if needed.
6628     switch (VA.getLocInfo()) {
6629     default:
6630       llvm_unreachable("Unknown loc info!");
6631     case CCValAssign::Full:
6632       break;
6633     case CCValAssign::SExt:
6634       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
6635       break;
6636     case CCValAssign::ZExt:
6637       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6638       break;
6639     case CCValAssign::AExt:
6640       if (Outs[i].ArgVT == MVT::i1) {
6641         // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
6642         //
6643         // Check if we actually have to do this, because the value may
6644         // already be zero-extended.
6645         //
6646         // We cannot just emit a (zext i8 (trunc (assert-zext i8)))
6647         // and rely on DAGCombiner to fold this, because the following
6648         // (anyext i32) is combined with (zext i8) in DAG.getNode:
6649         //
6650         //   (ext (zext x)) -> (zext x)
6651         //
6652         // This will give us (zext i32), which we cannot remove, so
6653         // try to check this beforehand.
6654         if (!checkZExtBool(Arg, DAG)) {
6655           Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6656           Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
6657         }
6658       }
6659       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6660       break;
6661     case CCValAssign::AExtUpper:
6662       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
6663       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6664       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
6665                         DAG.getConstant(32, DL, VA.getLocVT()));
6666       break;
6667     case CCValAssign::BCvt:
6668       Arg = DAG.getBitcast(VA.getLocVT(), Arg);
6669       break;
6670     case CCValAssign::Trunc:
6671       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
6672       break;
6673     case CCValAssign::FPExt:
6674       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
6675       break;
6676     case CCValAssign::Indirect:
6677       assert(VA.getValVT().isScalableVector() &&
6678              "Only scalable vectors can be passed indirectly");
6679 
6680       uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize();
6681       uint64_t PartSize = StoreSize;
6682       unsigned NumParts = 1;
6683       if (Outs[i].Flags.isInConsecutiveRegs()) {
6684         assert(!Outs[i].Flags.isInConsecutiveRegsLast());
6685         while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
6686           ++NumParts;
6687         StoreSize *= NumParts;
6688       }
6689 
6690       MachineFrameInfo &MFI = MF.getFrameInfo();
6691       Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
6692       Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
6693       int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
6694       MFI.setStackID(FI, TargetStackID::ScalableVector);
6695 
6696       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6697       SDValue Ptr = DAG.getFrameIndex(
6698           FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
6699       SDValue SpillSlot = Ptr;
6700 
6701       // Ensure we generate all stores for each tuple part, whilst updating the
6702       // pointer after each store correctly using vscale.
6703       while (NumParts) {
6704         Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
6705         NumParts--;
6706         if (NumParts > 0) {
6707           SDValue BytesIncrement = DAG.getVScale(
6708               DL, Ptr.getValueType(),
6709               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
6710           SDNodeFlags Flags;
6711           Flags.setNoUnsignedWrap(true);
6712 
6713           MPI = MachinePointerInfo(MPI.getAddrSpace());
6714           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6715                             BytesIncrement, Flags);
6716           ExtraArgLocs++;
6717           i++;
6718         }
6719       }
6720 
6721       Arg = SpillSlot;
6722       break;
6723     }
6724 
6725     if (VA.isRegLoc()) {
6726       if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
6727           Outs[0].VT == MVT::i64) {
6728         assert(VA.getLocVT() == MVT::i64 &&
6729                "unexpected calling convention register assignment");
6730         assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
6731                "unexpected use of 'returned'");
6732         IsThisReturn = true;
6733       }
6734       if (RegsUsed.count(VA.getLocReg())) {
6735         // If this register has already been used then we're trying to pack
6736         // parts of an [N x i32] into an X-register. The extension type will
6737         // take care of putting the two halves in the right place but we have to
6738         // combine them.
6739         SDValue &Bits =
6740             llvm::find_if(RegsToPass,
6741                           [=](const std::pair<unsigned, SDValue> &Elt) {
6742                             return Elt.first == VA.getLocReg();
6743                           })
6744                 ->second;
6745         Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
6746         // Call site info is used for function's parameter entry value
6747         // tracking. For now we track only simple cases when parameter
6748         // is transferred through whole register.
6749         llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) {
6750           return ArgReg.Reg == VA.getLocReg();
6751         });
6752       } else {
6753         RegsToPass.emplace_back(VA.getLocReg(), Arg);
6754         RegsUsed.insert(VA.getLocReg());
6755         const TargetOptions &Options = DAG.getTarget().Options;
6756         if (Options.EmitCallSiteInfo)
6757           CSInfo.emplace_back(VA.getLocReg(), i);
6758       }
6759     } else {
6760       assert(VA.isMemLoc());
6761 
6762       SDValue DstAddr;
6763       MachinePointerInfo DstInfo;
6764 
6765       // FIXME: This works on big-endian for composite byvals, which are the
6766       // common case. It should also work for fundamental types too.
6767       uint32_t BEAlign = 0;
6768       unsigned OpSize;
6769       if (VA.getLocInfo() == CCValAssign::Indirect ||
6770           VA.getLocInfo() == CCValAssign::Trunc)
6771         OpSize = VA.getLocVT().getFixedSizeInBits();
6772       else
6773         OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
6774                                  : VA.getValVT().getSizeInBits();
6775       OpSize = (OpSize + 7) / 8;
6776       if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
6777           !Flags.isInConsecutiveRegs()) {
6778         if (OpSize < 8)
6779           BEAlign = 8 - OpSize;
6780       }
6781       unsigned LocMemOffset = VA.getLocMemOffset();
6782       int32_t Offset = LocMemOffset + BEAlign;
6783       SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6784       PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6785 
6786       if (IsTailCall) {
6787         Offset = Offset + FPDiff;
6788         int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
6789 
6790         DstAddr = DAG.getFrameIndex(FI, PtrVT);
6791         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
6792 
6793         // Make sure any stack arguments overlapping with where we're storing
6794         // are loaded before this eventual operation. Otherwise they'll be
6795         // clobbered.
6796         Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
6797       } else {
6798         SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6799 
6800         DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6801         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
6802       }
6803 
6804       if (Outs[i].Flags.isByVal()) {
6805         SDValue SizeNode =
6806             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
6807         SDValue Cpy = DAG.getMemcpy(
6808             Chain, DL, DstAddr, Arg, SizeNode,
6809             Outs[i].Flags.getNonZeroByValAlign(),
6810             /*isVol = */ false, /*AlwaysInline = */ false,
6811             /*isTailCall = */ false, DstInfo, MachinePointerInfo());
6812 
6813         MemOpChains.push_back(Cpy);
6814       } else {
6815         // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
6816         // promoted to a legal register type i32, we should truncate Arg back to
6817         // i1/i8/i16.
6818         if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
6819             VA.getValVT() == MVT::i16)
6820           Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
6821 
6822         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
6823         MemOpChains.push_back(Store);
6824       }
6825     }
6826   }
6827 
6828   if (!MemOpChains.empty())
6829     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
6830 
6831   // Build a sequence of copy-to-reg nodes chained together with token chain
6832   // and flag operands which copy the outgoing args into the appropriate regs.
6833   SDValue InFlag;
6834   for (auto &RegToPass : RegsToPass) {
6835     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
6836                              RegToPass.second, InFlag);
6837     InFlag = Chain.getValue(1);
6838   }
6839 
6840   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
6841   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
6842   // node so that legalize doesn't hack it.
6843   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6844     auto GV = G->getGlobal();
6845     unsigned OpFlags =
6846         Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine());
6847     if (OpFlags & AArch64II::MO_GOT) {
6848       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
6849       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6850     } else {
6851       const GlobalValue *GV = G->getGlobal();
6852       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
6853     }
6854   } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
6855     if (getTargetMachine().getCodeModel() == CodeModel::Large &&
6856         Subtarget->isTargetMachO()) {
6857       const char *Sym = S->getSymbol();
6858       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
6859       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6860     } else {
6861       const char *Sym = S->getSymbol();
6862       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
6863     }
6864   }
6865 
6866   // We don't usually want to end the call-sequence here because we would tidy
6867   // the frame up *after* the call, however in the ABI-changing tail-call case
6868   // we've carefully laid out the parameters so that when sp is reset they'll be
6869   // in the correct location.
6870   if (IsTailCall && !IsSibCall) {
6871     Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
6872                                DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
6873     InFlag = Chain.getValue(1);
6874   }
6875 
6876   std::vector<SDValue> Ops;
6877   Ops.push_back(Chain);
6878   Ops.push_back(Callee);
6879 
6880   if (IsTailCall) {
6881     // Each tail call may have to adjust the stack by a different amount, so
6882     // this information must travel along with the operation for eventual
6883     // consumption by emitEpilogue.
6884     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
6885   }
6886 
6887   // Add argument registers to the end of the list so that they are known live
6888   // into the call.
6889   for (auto &RegToPass : RegsToPass)
6890     Ops.push_back(DAG.getRegister(RegToPass.first,
6891                                   RegToPass.second.getValueType()));
6892 
6893   // Add a register mask operand representing the call-preserved registers.
6894   const uint32_t *Mask;
6895   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6896   if (IsThisReturn) {
6897     // For 'this' returns, use the X0-preserving mask if applicable
6898     Mask = TRI->getThisReturnPreservedMask(MF, CallConv);
6899     if (!Mask) {
6900       IsThisReturn = false;
6901       Mask = TRI->getCallPreservedMask(MF, CallConv);
6902     }
6903   } else
6904     Mask = TRI->getCallPreservedMask(MF, CallConv);
6905 
6906   if (Subtarget->hasCustomCallingConv())
6907     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
6908 
6909   if (TRI->isAnyArgRegReserved(MF))
6910     TRI->emitReservedArgRegCallError(MF);
6911 
6912   assert(Mask && "Missing call preserved mask for calling convention");
6913   Ops.push_back(DAG.getRegisterMask(Mask));
6914 
6915   if (InFlag.getNode())
6916     Ops.push_back(InFlag);
6917 
6918   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6919 
6920   // If we're doing a tall call, use a TC_RETURN here rather than an
6921   // actual call instruction.
6922   if (IsTailCall) {
6923     MF.getFrameInfo().setHasTailCall();
6924     SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
6925     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
6926     return Ret;
6927   }
6928 
6929   unsigned CallOpc = AArch64ISD::CALL;
6930   // Calls with operand bundle "clang.arc.attachedcall" are special. They should
6931   // be expanded to the call, directly followed by a special marker sequence and
6932   // a call to an ObjC library function.  Use CALL_RVMARKER to do that.
6933   if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
6934     assert(!IsTailCall &&
6935            "tail calls cannot be marked with clang.arc.attachedcall");
6936     CallOpc = AArch64ISD::CALL_RVMARKER;
6937 
6938     // Add a target global address for the retainRV/claimRV runtime function
6939     // just before the call target.
6940     Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
6941     auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT);
6942     Ops.insert(Ops.begin() + 1, GA);
6943   } else if (GuardWithBTI)
6944     CallOpc = AArch64ISD::CALL_BTI;
6945 
6946   // Returns a chain and a flag for retval copy to use.
6947   Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
6948   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
6949   InFlag = Chain.getValue(1);
6950   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
6951 
6952   uint64_t CalleePopBytes =
6953       DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
6954 
6955   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
6956                              DAG.getIntPtrConstant(CalleePopBytes, DL, true),
6957                              InFlag, DL);
6958   if (!Ins.empty())
6959     InFlag = Chain.getValue(1);
6960 
6961   // Handle result values, copying them out of physregs into vregs that we
6962   // return.
6963   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, RVLocs, DL, DAG,
6964                          InVals, IsThisReturn,
6965                          IsThisReturn ? OutVals[0] : SDValue());
6966 }
6967 
6968 bool AArch64TargetLowering::CanLowerReturn(
6969     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
6970     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
6971   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6972   SmallVector<CCValAssign, 16> RVLocs;
6973   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6974   return CCInfo.CheckReturn(Outs, RetCC);
6975 }
6976 
6977 SDValue
6978 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6979                                    bool isVarArg,
6980                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
6981                                    const SmallVectorImpl<SDValue> &OutVals,
6982                                    const SDLoc &DL, SelectionDAG &DAG) const {
6983   auto &MF = DAG.getMachineFunction();
6984   auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6985 
6986   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6987   SmallVector<CCValAssign, 16> RVLocs;
6988   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
6989   CCInfo.AnalyzeReturn(Outs, RetCC);
6990 
6991   // Copy the result values into the output registers.
6992   SDValue Flag;
6993   SmallVector<std::pair<unsigned, SDValue>, 4> RetVals;
6994   SmallSet<unsigned, 4> RegsUsed;
6995   for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
6996        ++i, ++realRVLocIdx) {
6997     CCValAssign &VA = RVLocs[i];
6998     assert(VA.isRegLoc() && "Can only return in registers!");
6999     SDValue Arg = OutVals[realRVLocIdx];
7000 
7001     switch (VA.getLocInfo()) {
7002     default:
7003       llvm_unreachable("Unknown loc info!");
7004     case CCValAssign::Full:
7005       if (Outs[i].ArgVT == MVT::i1) {
7006         // AAPCS requires i1 to be zero-extended to i8 by the producer of the
7007         // value. This is strictly redundant on Darwin (which uses "zeroext
7008         // i1"), but will be optimised out before ISel.
7009         Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
7010         Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
7011       }
7012       break;
7013     case CCValAssign::BCvt:
7014       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
7015       break;
7016     case CCValAssign::AExt:
7017     case CCValAssign::ZExt:
7018       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7019       break;
7020     case CCValAssign::AExtUpper:
7021       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
7022       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7023       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
7024                         DAG.getConstant(32, DL, VA.getLocVT()));
7025       break;
7026     }
7027 
7028     if (RegsUsed.count(VA.getLocReg())) {
7029       SDValue &Bits =
7030           llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) {
7031             return Elt.first == VA.getLocReg();
7032           })->second;
7033       Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
7034     } else {
7035       RetVals.emplace_back(VA.getLocReg(), Arg);
7036       RegsUsed.insert(VA.getLocReg());
7037     }
7038   }
7039 
7040   SmallVector<SDValue, 4> RetOps(1, Chain);
7041   for (auto &RetVal : RetVals) {
7042     Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag);
7043     Flag = Chain.getValue(1);
7044     RetOps.push_back(
7045         DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
7046   }
7047 
7048   // Windows AArch64 ABIs require that for returning structs by value we copy
7049   // the sret argument into X0 for the return.
7050   // We saved the argument into a virtual register in the entry block,
7051   // so now we copy the value out and into X0.
7052   if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
7053     SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
7054                                      getPointerTy(MF.getDataLayout()));
7055 
7056     unsigned RetValReg = AArch64::X0;
7057     Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
7058     Flag = Chain.getValue(1);
7059 
7060     RetOps.push_back(
7061       DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
7062   }
7063 
7064   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7065   const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF);
7066   if (I) {
7067     for (; *I; ++I) {
7068       if (AArch64::GPR64RegClass.contains(*I))
7069         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7070       else if (AArch64::FPR64RegClass.contains(*I))
7071         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7072       else
7073         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7074     }
7075   }
7076 
7077   RetOps[0] = Chain; // Update chain.
7078 
7079   // Add the flag if we have it.
7080   if (Flag.getNode())
7081     RetOps.push_back(Flag);
7082 
7083   return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
7084 }
7085 
7086 //===----------------------------------------------------------------------===//
7087 //  Other Lowering Code
7088 //===----------------------------------------------------------------------===//
7089 
7090 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
7091                                              SelectionDAG &DAG,
7092                                              unsigned Flag) const {
7093   return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
7094                                     N->getOffset(), Flag);
7095 }
7096 
7097 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
7098                                              SelectionDAG &DAG,
7099                                              unsigned Flag) const {
7100   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
7101 }
7102 
7103 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
7104                                              SelectionDAG &DAG,
7105                                              unsigned Flag) const {
7106   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
7107                                    N->getOffset(), Flag);
7108 }
7109 
7110 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
7111                                              SelectionDAG &DAG,
7112                                              unsigned Flag) const {
7113   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
7114 }
7115 
7116 // (loadGOT sym)
7117 template <class NodeTy>
7118 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
7119                                       unsigned Flags) const {
7120   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
7121   SDLoc DL(N);
7122   EVT Ty = getPointerTy(DAG.getDataLayout());
7123   SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
7124   // FIXME: Once remat is capable of dealing with instructions with register
7125   // operands, expand this into two nodes instead of using a wrapper node.
7126   return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
7127 }
7128 
7129 // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
7130 template <class NodeTy>
7131 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
7132                                             unsigned Flags) const {
7133   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
7134   SDLoc DL(N);
7135   EVT Ty = getPointerTy(DAG.getDataLayout());
7136   const unsigned char MO_NC = AArch64II::MO_NC;
7137   return DAG.getNode(
7138       AArch64ISD::WrapperLarge, DL, Ty,
7139       getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags),
7140       getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags),
7141       getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags),
7142       getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags));
7143 }
7144 
7145 // (addlow (adrp %hi(sym)) %lo(sym))
7146 template <class NodeTy>
7147 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
7148                                        unsigned Flags) const {
7149   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
7150   SDLoc DL(N);
7151   EVT Ty = getPointerTy(DAG.getDataLayout());
7152   SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
7153   SDValue Lo = getTargetNode(N, Ty, DAG,
7154                              AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags);
7155   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
7156   return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
7157 }
7158 
7159 // (adr sym)
7160 template <class NodeTy>
7161 SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
7162                                            unsigned Flags) const {
7163   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
7164   SDLoc DL(N);
7165   EVT Ty = getPointerTy(DAG.getDataLayout());
7166   SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
7167   return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
7168 }
7169 
7170 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
7171                                                   SelectionDAG &DAG) const {
7172   GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
7173   const GlobalValue *GV = GN->getGlobal();
7174   unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
7175 
7176   if (OpFlags != AArch64II::MO_NO_FLAG)
7177     assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
7178            "unexpected offset in global node");
7179 
7180   // This also catches the large code model case for Darwin, and tiny code
7181   // model with got relocations.
7182   if ((OpFlags & AArch64II::MO_GOT) != 0) {
7183     return getGOT(GN, DAG, OpFlags);
7184   }
7185 
7186   SDValue Result;
7187   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
7188     Result = getAddrLarge(GN, DAG, OpFlags);
7189   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
7190     Result = getAddrTiny(GN, DAG, OpFlags);
7191   } else {
7192     Result = getAddr(GN, DAG, OpFlags);
7193   }
7194   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7195   SDLoc DL(GN);
7196   if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB))
7197     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
7198                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
7199   return Result;
7200 }
7201 
7202 /// Convert a TLS address reference into the correct sequence of loads
7203 /// and calls to compute the variable's address (for Darwin, currently) and
7204 /// return an SDValue containing the final node.
7205 
7206 /// Darwin only has one TLS scheme which must be capable of dealing with the
7207 /// fully general situation, in the worst case. This means:
7208 ///     + "extern __thread" declaration.
7209 ///     + Defined in a possibly unknown dynamic library.
7210 ///
7211 /// The general system is that each __thread variable has a [3 x i64] descriptor
7212 /// which contains information used by the runtime to calculate the address. The
7213 /// only part of this the compiler needs to know about is the first xword, which
7214 /// contains a function pointer that must be called with the address of the
7215 /// entire descriptor in "x0".
7216 ///
7217 /// Since this descriptor may be in a different unit, in general even the
7218 /// descriptor must be accessed via an indirect load. The "ideal" code sequence
7219 /// is:
7220 ///     adrp x0, _var@TLVPPAGE
7221 ///     ldr x0, [x0, _var@TLVPPAGEOFF]   ; x0 now contains address of descriptor
7222 ///     ldr x1, [x0]                     ; x1 contains 1st entry of descriptor,
7223 ///                                      ; the function pointer
7224 ///     blr x1                           ; Uses descriptor address in x0
7225 ///     ; Address of _var is now in x0.
7226 ///
7227 /// If the address of _var's descriptor *is* known to the linker, then it can
7228 /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
7229 /// a slight efficiency gain.
7230 SDValue
7231 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
7232                                                    SelectionDAG &DAG) const {
7233   assert(Subtarget->isTargetDarwin() &&
7234          "This function expects a Darwin target");
7235 
7236   SDLoc DL(Op);
7237   MVT PtrVT = getPointerTy(DAG.getDataLayout());
7238   MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout());
7239   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
7240 
7241   SDValue TLVPAddr =
7242       DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7243   SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
7244 
7245   // The first entry in the descriptor is a function pointer that we must call
7246   // to obtain the address of the variable.
7247   SDValue Chain = DAG.getEntryNode();
7248   SDValue FuncTLVGet = DAG.getLoad(
7249       PtrMemVT, DL, Chain, DescAddr,
7250       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
7251       Align(PtrMemVT.getSizeInBits() / 8),
7252       MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7253   Chain = FuncTLVGet.getValue(1);
7254 
7255   // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer.
7256   FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT);
7257 
7258   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7259   MFI.setAdjustsStack(true);
7260 
7261   // TLS calls preserve all registers except those that absolutely must be
7262   // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
7263   // silly).
7264   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7265   const uint32_t *Mask = TRI->getTLSCallPreservedMask();
7266   if (Subtarget->hasCustomCallingConv())
7267     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
7268 
7269   // Finally, we can make the call. This is just a degenerate version of a
7270   // normal AArch64 call node: x0 takes the address of the descriptor, and
7271   // returns the address of the variable in this thread.
7272   Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
7273   Chain =
7274       DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
7275                   Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
7276                   DAG.getRegisterMask(Mask), Chain.getValue(1));
7277   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
7278 }
7279 
7280 /// Convert a thread-local variable reference into a sequence of instructions to
7281 /// compute the variable's address for the local exec TLS model of ELF targets.
7282 /// The sequence depends on the maximum TLS area size.
7283 SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
7284                                                     SDValue ThreadBase,
7285                                                     const SDLoc &DL,
7286                                                     SelectionDAG &DAG) const {
7287   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7288   SDValue TPOff, Addr;
7289 
7290   switch (DAG.getTarget().Options.TLSSize) {
7291   default:
7292     llvm_unreachable("Unexpected TLS size");
7293 
7294   case 12: {
7295     // mrs   x0, TPIDR_EL0
7296     // add   x0, x0, :tprel_lo12:a
7297     SDValue Var = DAG.getTargetGlobalAddress(
7298         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
7299     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7300                                       Var,
7301                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7302                    0);
7303   }
7304 
7305   case 24: {
7306     // mrs   x0, TPIDR_EL0
7307     // add   x0, x0, :tprel_hi12:a
7308     // add   x0, x0, :tprel_lo12_nc:a
7309     SDValue HiVar = DAG.getTargetGlobalAddress(
7310         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7311     SDValue LoVar = DAG.getTargetGlobalAddress(
7312         GV, DL, PtrVT, 0,
7313         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7314     Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7315                                       HiVar,
7316                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7317                    0);
7318     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr,
7319                                       LoVar,
7320                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7321                    0);
7322   }
7323 
7324   case 32: {
7325     // mrs   x1, TPIDR_EL0
7326     // movz  x0, #:tprel_g1:a
7327     // movk  x0, #:tprel_g0_nc:a
7328     // add   x0, x1, x0
7329     SDValue HiVar = DAG.getTargetGlobalAddress(
7330         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
7331     SDValue LoVar = DAG.getTargetGlobalAddress(
7332         GV, DL, PtrVT, 0,
7333         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7334     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7335                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7336                     0);
7337     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7338                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7339                     0);
7340     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7341   }
7342 
7343   case 48: {
7344     // mrs   x1, TPIDR_EL0
7345     // movz  x0, #:tprel_g2:a
7346     // movk  x0, #:tprel_g1_nc:a
7347     // movk  x0, #:tprel_g0_nc:a
7348     // add   x0, x1, x0
7349     SDValue HiVar = DAG.getTargetGlobalAddress(
7350         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2);
7351     SDValue MiVar = DAG.getTargetGlobalAddress(
7352         GV, DL, PtrVT, 0,
7353         AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC);
7354     SDValue LoVar = DAG.getTargetGlobalAddress(
7355         GV, DL, PtrVT, 0,
7356         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7357     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7358                                        DAG.getTargetConstant(32, DL, MVT::i32)),
7359                     0);
7360     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar,
7361                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7362                     0);
7363     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7364                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7365                     0);
7366     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7367   }
7368   }
7369 }
7370 
7371 /// When accessing thread-local variables under either the general-dynamic or
7372 /// local-dynamic system, we make a "TLS-descriptor" call. The variable will
7373 /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
7374 /// is a function pointer to carry out the resolution.
7375 ///
7376 /// The sequence is:
7377 ///    adrp  x0, :tlsdesc:var
7378 ///    ldr   x1, [x0, #:tlsdesc_lo12:var]
7379 ///    add   x0, x0, #:tlsdesc_lo12:var
7380 ///    .tlsdesccall var
7381 ///    blr   x1
7382 ///    (TPIDR_EL0 offset now in x0)
7383 ///
7384 ///  The above sequence must be produced unscheduled, to enable the linker to
7385 ///  optimize/relax this sequence.
7386 ///  Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
7387 ///  above sequence, and expanded really late in the compilation flow, to ensure
7388 ///  the sequence is produced as per above.
7389 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
7390                                                       const SDLoc &DL,
7391                                                       SelectionDAG &DAG) const {
7392   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7393 
7394   SDValue Chain = DAG.getEntryNode();
7395   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7396 
7397   Chain =
7398       DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
7399   SDValue Glue = Chain.getValue(1);
7400 
7401   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
7402 }
7403 
7404 SDValue
7405 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
7406                                                 SelectionDAG &DAG) const {
7407   assert(Subtarget->isTargetELF() && "This function expects an ELF target");
7408 
7409   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7410 
7411   TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
7412 
7413   if (!EnableAArch64ELFLocalDynamicTLSGeneration) {
7414     if (Model == TLSModel::LocalDynamic)
7415       Model = TLSModel::GeneralDynamic;
7416   }
7417 
7418   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
7419       Model != TLSModel::LocalExec)
7420     report_fatal_error("ELF TLS only supported in small memory model or "
7421                        "in local exec TLS model");
7422   // Different choices can be made for the maximum size of the TLS area for a
7423   // module. For the small address model, the default TLS size is 16MiB and the
7424   // maximum TLS size is 4GiB.
7425   // FIXME: add tiny and large code model support for TLS access models other
7426   // than local exec. We currently generate the same code as small for tiny,
7427   // which may be larger than needed.
7428 
7429   SDValue TPOff;
7430   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7431   SDLoc DL(Op);
7432   const GlobalValue *GV = GA->getGlobal();
7433 
7434   SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
7435 
7436   if (Model == TLSModel::LocalExec) {
7437     return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG);
7438   } else if (Model == TLSModel::InitialExec) {
7439     TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7440     TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
7441   } else if (Model == TLSModel::LocalDynamic) {
7442     // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
7443     // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
7444     // the beginning of the module's TLS region, followed by a DTPREL offset
7445     // calculation.
7446 
7447     // These accesses will need deduplicating if there's more than one.
7448     AArch64FunctionInfo *MFI =
7449         DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
7450     MFI->incNumLocalDynamicTLSAccesses();
7451 
7452     // The call needs a relocation too for linker relaxation. It doesn't make
7453     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7454     // the address.
7455     SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
7456                                                   AArch64II::MO_TLS);
7457 
7458     // Now we can calculate the offset from TPIDR_EL0 to this module's
7459     // thread-local area.
7460     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7461 
7462     // Now use :dtprel_whatever: operations to calculate this variable's offset
7463     // in its thread-storage area.
7464     SDValue HiVar = DAG.getTargetGlobalAddress(
7465         GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7466     SDValue LoVar = DAG.getTargetGlobalAddress(
7467         GV, DL, MVT::i64, 0,
7468         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7469 
7470     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
7471                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7472                     0);
7473     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
7474                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7475                     0);
7476   } else if (Model == TLSModel::GeneralDynamic) {
7477     // The call needs a relocation too for linker relaxation. It doesn't make
7478     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7479     // the address.
7480     SDValue SymAddr =
7481         DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7482 
7483     // Finally we can make a call to calculate the offset from tpidr_el0.
7484     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7485   } else
7486     llvm_unreachable("Unsupported ELF TLS access model");
7487 
7488   return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7489 }
7490 
7491 SDValue
7492 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
7493                                                     SelectionDAG &DAG) const {
7494   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
7495 
7496   SDValue Chain = DAG.getEntryNode();
7497   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7498   SDLoc DL(Op);
7499 
7500   SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64);
7501 
7502   // Load the ThreadLocalStoragePointer from the TEB
7503   // A pointer to the TLS array is located at offset 0x58 from the TEB.
7504   SDValue TLSArray =
7505       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL));
7506   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
7507   Chain = TLSArray.getValue(1);
7508 
7509   // Load the TLS index from the C runtime;
7510   // This does the same as getAddr(), but without having a GlobalAddressSDNode.
7511   // This also does the same as LOADgot, but using a generic i32 load,
7512   // while LOADgot only loads i64.
7513   SDValue TLSIndexHi =
7514       DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE);
7515   SDValue TLSIndexLo = DAG.getTargetExternalSymbol(
7516       "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7517   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi);
7518   SDValue TLSIndex =
7519       DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo);
7520   TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo());
7521   Chain = TLSIndex.getValue(1);
7522 
7523   // The pointer to the thread's TLS data area is at the TLS Index scaled by 8
7524   // offset into the TLSArray.
7525   TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex);
7526   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
7527                              DAG.getConstant(3, DL, PtrVT));
7528   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
7529                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
7530                             MachinePointerInfo());
7531   Chain = TLS.getValue(1);
7532 
7533   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7534   const GlobalValue *GV = GA->getGlobal();
7535   SDValue TGAHi = DAG.getTargetGlobalAddress(
7536       GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7537   SDValue TGALo = DAG.getTargetGlobalAddress(
7538       GV, DL, PtrVT, 0,
7539       AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7540 
7541   // Add the offset from the start of the .tls section (section base).
7542   SDValue Addr =
7543       SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi,
7544                                  DAG.getTargetConstant(0, DL, MVT::i32)),
7545               0);
7546   Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo);
7547   return Addr;
7548 }
7549 
7550 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
7551                                                      SelectionDAG &DAG) const {
7552   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7553   if (DAG.getTarget().useEmulatedTLS())
7554     return LowerToTLSEmulatedModel(GA, DAG);
7555 
7556   if (Subtarget->isTargetDarwin())
7557     return LowerDarwinGlobalTLSAddress(Op, DAG);
7558   if (Subtarget->isTargetELF())
7559     return LowerELFGlobalTLSAddress(Op, DAG);
7560   if (Subtarget->isTargetWindows())
7561     return LowerWindowsGlobalTLSAddress(Op, DAG);
7562 
7563   llvm_unreachable("Unexpected platform trying to use TLS");
7564 }
7565 
7566 // Looks through \param Val to determine the bit that can be used to
7567 // check the sign of the value. It returns the unextended value and
7568 // the sign bit position.
7569 std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {
7570   if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG)
7571     return {Val.getOperand(0),
7572             cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() -
7573                 1};
7574 
7575   if (Val.getOpcode() == ISD::SIGN_EXTEND)
7576     return {Val.getOperand(0),
7577             Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1};
7578 
7579   return {Val, Val.getValueSizeInBits() - 1};
7580 }
7581 
7582 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
7583   SDValue Chain = Op.getOperand(0);
7584   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
7585   SDValue LHS = Op.getOperand(2);
7586   SDValue RHS = Op.getOperand(3);
7587   SDValue Dest = Op.getOperand(4);
7588   SDLoc dl(Op);
7589 
7590   MachineFunction &MF = DAG.getMachineFunction();
7591   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
7592   // will not be produced, as they are conditional branch instructions that do
7593   // not set flags.
7594   bool ProduceNonFlagSettingCondBr =
7595       !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
7596 
7597   // Handle f128 first, since lowering it will result in comparing the return
7598   // value of a libcall against zero, which is just what the rest of LowerBR_CC
7599   // is expecting to deal with.
7600   if (LHS.getValueType() == MVT::f128) {
7601     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
7602 
7603     // If softenSetCCOperands returned a scalar, we need to compare the result
7604     // against zero to select between true and false values.
7605     if (!RHS.getNode()) {
7606       RHS = DAG.getConstant(0, dl, LHS.getValueType());
7607       CC = ISD::SETNE;
7608     }
7609   }
7610 
7611   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
7612   // instruction.
7613   if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) &&
7614       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7615     // Only lower legal XALUO ops.
7616     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
7617       return SDValue();
7618 
7619     // The actual operation with overflow check.
7620     AArch64CC::CondCode OFCC;
7621     SDValue Value, Overflow;
7622     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
7623 
7624     if (CC == ISD::SETNE)
7625       OFCC = getInvertedCondCode(OFCC);
7626     SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
7627 
7628     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7629                        Overflow);
7630   }
7631 
7632   if (LHS.getValueType().isInteger()) {
7633     assert((LHS.getValueType() == RHS.getValueType()) &&
7634            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
7635 
7636     // If the RHS of the comparison is zero, we can potentially fold this
7637     // to a specialized branch.
7638     const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
7639     if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
7640       if (CC == ISD::SETEQ) {
7641         // See if we can use a TBZ to fold in an AND as well.
7642         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7643         // out of bounds, a late MI-layer pass rewrites branches.
7644         // 403.gcc is an example that hits this case.
7645         if (LHS.getOpcode() == ISD::AND &&
7646             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7647             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7648           SDValue Test = LHS.getOperand(0);
7649           uint64_t Mask = LHS.getConstantOperandVal(1);
7650           return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
7651                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7652                              Dest);
7653         }
7654 
7655         return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
7656       } else if (CC == ISD::SETNE) {
7657         // See if we can use a TBZ to fold in an AND as well.
7658         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7659         // out of bounds, a late MI-layer pass rewrites branches.
7660         // 403.gcc is an example that hits this case.
7661         if (LHS.getOpcode() == ISD::AND &&
7662             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7663             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7664           SDValue Test = LHS.getOperand(0);
7665           uint64_t Mask = LHS.getConstantOperandVal(1);
7666           return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
7667                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7668                              Dest);
7669         }
7670 
7671         return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
7672       } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) {
7673         // Don't combine AND since emitComparison converts the AND to an ANDS
7674         // (a.k.a. TST) and the test in the test bit and branch instruction
7675         // becomes redundant.  This would also increase register pressure.
7676         uint64_t SignBitPos;
7677         std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7678         return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
7679                            DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7680       }
7681     }
7682     if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
7683         LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
7684       // Don't combine AND since emitComparison converts the AND to an ANDS
7685       // (a.k.a. TST) and the test in the test bit and branch instruction
7686       // becomes redundant.  This would also increase register pressure.
7687       uint64_t SignBitPos;
7688       std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7689       return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
7690                          DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7691     }
7692 
7693     SDValue CCVal;
7694     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
7695     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7696                        Cmp);
7697   }
7698 
7699   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||
7700          LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
7701 
7702   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
7703   // clean.  Some of them require two branches to implement.
7704   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
7705   AArch64CC::CondCode CC1, CC2;
7706   changeFPCCToAArch64CC(CC, CC1, CC2);
7707   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
7708   SDValue BR1 =
7709       DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
7710   if (CC2 != AArch64CC::AL) {
7711     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
7712     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
7713                        Cmp);
7714   }
7715 
7716   return BR1;
7717 }
7718 
7719 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
7720                                               SelectionDAG &DAG) const {
7721   if (!Subtarget->hasNEON())
7722     return SDValue();
7723 
7724   EVT VT = Op.getValueType();
7725   EVT IntVT = VT.changeTypeToInteger();
7726   SDLoc DL(Op);
7727 
7728   SDValue In1 = Op.getOperand(0);
7729   SDValue In2 = Op.getOperand(1);
7730   EVT SrcVT = In2.getValueType();
7731 
7732   if (SrcVT.bitsLT(VT))
7733     In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
7734   else if (SrcVT.bitsGT(VT))
7735     In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL));
7736 
7737   if (VT.isScalableVector())
7738     IntVT =
7739         getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
7740 
7741   if (VT != In2.getValueType())
7742     return SDValue();
7743 
7744   auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
7745     if (VT.isScalableVector())
7746       return getSVESafeBitCast(VT, Op, DAG);
7747 
7748     return DAG.getBitcast(VT, Op);
7749   };
7750 
7751   SDValue VecVal1, VecVal2;
7752   EVT VecVT;
7753   auto SetVecVal = [&](int Idx = -1) {
7754     if (!VT.isVector()) {
7755       VecVal1 =
7756           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1);
7757       VecVal2 =
7758           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2);
7759     } else {
7760       VecVal1 = BitCast(VecVT, In1, DAG);
7761       VecVal2 = BitCast(VecVT, In2, DAG);
7762     }
7763   };
7764   if (VT.isVector()) {
7765     VecVT = IntVT;
7766     SetVecVal();
7767   } else if (VT == MVT::f64) {
7768     VecVT = MVT::v2i64;
7769     SetVecVal(AArch64::dsub);
7770   } else if (VT == MVT::f32) {
7771     VecVT = MVT::v4i32;
7772     SetVecVal(AArch64::ssub);
7773   } else if (VT == MVT::f16) {
7774     VecVT = MVT::v8i16;
7775     SetVecVal(AArch64::hsub);
7776   } else {
7777     llvm_unreachable("Invalid type for copysign!");
7778   }
7779 
7780   unsigned BitWidth = In1.getScalarValueSizeInBits();
7781   SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT);
7782 
7783   // We want to materialize a mask with every bit but the high bit set, but the
7784   // AdvSIMD immediate moves cannot materialize that in a single instruction for
7785   // 64-bit elements. Instead, materialize all bits set and then negate that.
7786   if (VT == MVT::f64 || VT == MVT::v2f64) {
7787     SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT);
7788     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV);
7789     SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV);
7790     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV);
7791   }
7792 
7793   SDValue BSP =
7794       DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2);
7795   if (VT == MVT::f16)
7796     return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP);
7797   if (VT == MVT::f32)
7798     return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP);
7799   if (VT == MVT::f64)
7800     return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP);
7801 
7802   return BitCast(VT, BSP, DAG);
7803 }
7804 
7805 SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
7806                                                  SelectionDAG &DAG) const {
7807   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
7808           Attribute::NoImplicitFloat))
7809     return SDValue();
7810 
7811   if (!Subtarget->hasNEON())
7812     return SDValue();
7813 
7814   bool IsParity = Op.getOpcode() == ISD::PARITY;
7815 
7816   // While there is no integer popcount instruction, it can
7817   // be more efficiently lowered to the following sequence that uses
7818   // AdvSIMD registers/instructions as long as the copies to/from
7819   // the AdvSIMD registers are cheap.
7820   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
7821   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
7822   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
7823   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
7824   SDValue Val = Op.getOperand(0);
7825   SDLoc DL(Op);
7826   EVT VT = Op.getValueType();
7827 
7828   if (VT == MVT::i32 || VT == MVT::i64) {
7829     if (VT == MVT::i32)
7830       Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
7831     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
7832 
7833     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
7834     SDValue UaddLV = DAG.getNode(
7835         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7836         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7837 
7838     if (IsParity)
7839       UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7840                            DAG.getConstant(1, DL, MVT::i32));
7841 
7842     if (VT == MVT::i64)
7843       UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
7844     return UaddLV;
7845   } else if (VT == MVT::i128) {
7846     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val);
7847 
7848     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val);
7849     SDValue UaddLV = DAG.getNode(
7850         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7851         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7852 
7853     if (IsParity)
7854       UaddLV = DAG.getNode(ISD::AND, DL, MVT::i32, UaddLV,
7855                            DAG.getConstant(1, DL, MVT::i32));
7856 
7857     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV);
7858   }
7859 
7860   assert(!IsParity && "ISD::PARITY of vector types not supported");
7861 
7862   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
7863     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
7864 
7865   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
7866           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
7867          "Unexpected type for custom ctpop lowering");
7868 
7869   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
7870   Val = DAG.getBitcast(VT8Bit, Val);
7871   Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
7872 
7873   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
7874   unsigned EltSize = 8;
7875   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
7876   while (EltSize != VT.getScalarSizeInBits()) {
7877     EltSize *= 2;
7878     NumElts /= 2;
7879     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
7880     Val = DAG.getNode(
7881         ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
7882         DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
7883   }
7884 
7885   return Val;
7886 }
7887 
7888 SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
7889   EVT VT = Op.getValueType();
7890   assert(VT.isScalableVector() ||
7891          useSVEForFixedLengthVectorVT(
7892              VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()));
7893 
7894   SDLoc DL(Op);
7895   SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0));
7896   return DAG.getNode(ISD::CTLZ, DL, VT, RBIT);
7897 }
7898 
7899 SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
7900                                            SelectionDAG &DAG) const {
7901 
7902   EVT VT = Op.getValueType();
7903   SDLoc DL(Op);
7904   unsigned Opcode = Op.getOpcode();
7905   ISD::CondCode CC;
7906   switch (Opcode) {
7907   default:
7908     llvm_unreachable("Wrong instruction");
7909   case ISD::SMAX:
7910     CC = ISD::SETGT;
7911     break;
7912   case ISD::SMIN:
7913     CC = ISD::SETLT;
7914     break;
7915   case ISD::UMAX:
7916     CC = ISD::SETUGT;
7917     break;
7918   case ISD::UMIN:
7919     CC = ISD::SETULT;
7920     break;
7921   }
7922 
7923   if (VT.isScalableVector() ||
7924       useSVEForFixedLengthVectorVT(
7925           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
7926     switch (Opcode) {
7927     default:
7928       llvm_unreachable("Wrong instruction");
7929     case ISD::SMAX:
7930       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
7931     case ISD::SMIN:
7932       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
7933     case ISD::UMAX:
7934       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
7935     case ISD::UMIN:
7936       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
7937     }
7938   }
7939 
7940   SDValue Op0 = Op.getOperand(0);
7941   SDValue Op1 = Op.getOperand(1);
7942   SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
7943   return DAG.getSelect(DL, VT, Cond, Op0, Op1);
7944 }
7945 
7946 SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
7947                                                SelectionDAG &DAG) const {
7948   EVT VT = Op.getValueType();
7949 
7950   if (VT.isScalableVector() ||
7951       useSVEForFixedLengthVectorVT(
7952           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
7953     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU);
7954 
7955   SDLoc DL(Op);
7956   SDValue REVB;
7957   MVT VST;
7958 
7959   switch (VT.getSimpleVT().SimpleTy) {
7960   default:
7961     llvm_unreachable("Invalid type for bitreverse!");
7962 
7963   case MVT::v2i32: {
7964     VST = MVT::v8i8;
7965     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7966 
7967     break;
7968   }
7969 
7970   case MVT::v4i32: {
7971     VST = MVT::v16i8;
7972     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7973 
7974     break;
7975   }
7976 
7977   case MVT::v1i64: {
7978     VST = MVT::v8i8;
7979     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7980 
7981     break;
7982   }
7983 
7984   case MVT::v2i64: {
7985     VST = MVT::v16i8;
7986     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7987 
7988     break;
7989   }
7990   }
7991 
7992   return DAG.getNode(AArch64ISD::NVCAST, DL, VT,
7993                      DAG.getNode(ISD::BITREVERSE, DL, VST, REVB));
7994 }
7995 
7996 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
7997 
7998   if (Op.getValueType().isVector())
7999     return LowerVSETCC(Op, DAG);
8000 
8001   bool IsStrict = Op->isStrictFPOpcode();
8002   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
8003   unsigned OpNo = IsStrict ? 1 : 0;
8004   SDValue Chain;
8005   if (IsStrict)
8006     Chain = Op.getOperand(0);
8007   SDValue LHS = Op.getOperand(OpNo + 0);
8008   SDValue RHS = Op.getOperand(OpNo + 1);
8009   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get();
8010   SDLoc dl(Op);
8011 
8012   // We chose ZeroOrOneBooleanContents, so use zero and one.
8013   EVT VT = Op.getValueType();
8014   SDValue TVal = DAG.getConstant(1, dl, VT);
8015   SDValue FVal = DAG.getConstant(0, dl, VT);
8016 
8017   // Handle f128 first, since one possible outcome is a normal integer
8018   // comparison which gets picked up by the next if statement.
8019   if (LHS.getValueType() == MVT::f128) {
8020     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain,
8021                         IsSignaling);
8022 
8023     // If softenSetCCOperands returned a scalar, use it.
8024     if (!RHS.getNode()) {
8025       assert(LHS.getValueType() == Op.getValueType() &&
8026              "Unexpected setcc expansion!");
8027       return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS;
8028     }
8029   }
8030 
8031   if (LHS.getValueType().isInteger()) {
8032     SDValue CCVal;
8033     SDValue Cmp = getAArch64Cmp(
8034         LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
8035 
8036     // Note that we inverted the condition above, so we reverse the order of
8037     // the true and false operands here.  This will allow the setcc to be
8038     // matched to a single CSINC instruction.
8039     SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
8040     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
8041   }
8042 
8043   // Now we know we're dealing with FP values.
8044   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8045          LHS.getValueType() == MVT::f64);
8046 
8047   // If that fails, we'll need to perform an FCMP + CSEL sequence.  Go ahead
8048   // and do the comparison.
8049   SDValue Cmp;
8050   if (IsStrict)
8051     Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling);
8052   else
8053     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8054 
8055   AArch64CC::CondCode CC1, CC2;
8056   changeFPCCToAArch64CC(CC, CC1, CC2);
8057   SDValue Res;
8058   if (CC2 == AArch64CC::AL) {
8059     changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1,
8060                           CC2);
8061     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8062 
8063     // Note that we inverted the condition above, so we reverse the order of
8064     // the true and false operands here.  This will allow the setcc to be
8065     // matched to a single CSINC instruction.
8066     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
8067   } else {
8068     // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
8069     // totally clean.  Some of them require two CSELs to implement.  As is in
8070     // this case, we emit the first CSEL and then emit a second using the output
8071     // of the first as the RHS.  We're effectively OR'ing the two CC's together.
8072 
8073     // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
8074     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8075     SDValue CS1 =
8076         DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8077 
8078     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8079     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8080   }
8081   return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
8082 }
8083 
8084 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
8085                                               SDValue RHS, SDValue TVal,
8086                                               SDValue FVal, const SDLoc &dl,
8087                                               SelectionDAG &DAG) const {
8088   // Handle f128 first, because it will result in a comparison of some RTLIB
8089   // call result against zero.
8090   if (LHS.getValueType() == MVT::f128) {
8091     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
8092 
8093     // If softenSetCCOperands returned a scalar, we need to compare the result
8094     // against zero to select between true and false values.
8095     if (!RHS.getNode()) {
8096       RHS = DAG.getConstant(0, dl, LHS.getValueType());
8097       CC = ISD::SETNE;
8098     }
8099   }
8100 
8101   // Also handle f16, for which we need to do a f32 comparison.
8102   if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
8103     LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
8104     RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
8105   }
8106 
8107   // Next, handle integers.
8108   if (LHS.getValueType().isInteger()) {
8109     assert((LHS.getValueType() == RHS.getValueType()) &&
8110            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
8111 
8112     ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
8113     ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
8114     ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
8115     // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform
8116     // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
8117     // supported types.
8118     if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal &&
8119         CTVal->isOne() && CFVal->isAllOnes() &&
8120         LHS.getValueType() == TVal.getValueType()) {
8121       EVT VT = LHS.getValueType();
8122       SDValue Shift =
8123           DAG.getNode(ISD::SRA, dl, VT, LHS,
8124                       DAG.getConstant(VT.getSizeInBits() - 1, dl, VT));
8125       return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT));
8126     }
8127 
8128     unsigned Opcode = AArch64ISD::CSEL;
8129 
8130     // If both the TVal and the FVal are constants, see if we can swap them in
8131     // order to for a CSINV or CSINC out of them.
8132     if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) {
8133       std::swap(TVal, FVal);
8134       std::swap(CTVal, CFVal);
8135       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8136     } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) {
8137       std::swap(TVal, FVal);
8138       std::swap(CTVal, CFVal);
8139       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8140     } else if (TVal.getOpcode() == ISD::XOR) {
8141       // If TVal is a NOT we want to swap TVal and FVal so that we can match
8142       // with a CSINV rather than a CSEL.
8143       if (isAllOnesConstant(TVal.getOperand(1))) {
8144         std::swap(TVal, FVal);
8145         std::swap(CTVal, CFVal);
8146         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8147       }
8148     } else if (TVal.getOpcode() == ISD::SUB) {
8149       // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
8150       // that we can match with a CSNEG rather than a CSEL.
8151       if (isNullConstant(TVal.getOperand(0))) {
8152         std::swap(TVal, FVal);
8153         std::swap(CTVal, CFVal);
8154         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8155       }
8156     } else if (CTVal && CFVal) {
8157       const int64_t TrueVal = CTVal->getSExtValue();
8158       const int64_t FalseVal = CFVal->getSExtValue();
8159       bool Swap = false;
8160 
8161       // If both TVal and FVal are constants, see if FVal is the
8162       // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
8163       // instead of a CSEL in that case.
8164       if (TrueVal == ~FalseVal) {
8165         Opcode = AArch64ISD::CSINV;
8166       } else if (FalseVal > std::numeric_limits<int64_t>::min() &&
8167                  TrueVal == -FalseVal) {
8168         Opcode = AArch64ISD::CSNEG;
8169       } else if (TVal.getValueType() == MVT::i32) {
8170         // If our operands are only 32-bit wide, make sure we use 32-bit
8171         // arithmetic for the check whether we can use CSINC. This ensures that
8172         // the addition in the check will wrap around properly in case there is
8173         // an overflow (which would not be the case if we do the check with
8174         // 64-bit arithmetic).
8175         const uint32_t TrueVal32 = CTVal->getZExtValue();
8176         const uint32_t FalseVal32 = CFVal->getZExtValue();
8177 
8178         if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
8179           Opcode = AArch64ISD::CSINC;
8180 
8181           if (TrueVal32 > FalseVal32) {
8182             Swap = true;
8183           }
8184         }
8185         // 64-bit check whether we can use CSINC.
8186       } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
8187         Opcode = AArch64ISD::CSINC;
8188 
8189         if (TrueVal > FalseVal) {
8190           Swap = true;
8191         }
8192       }
8193 
8194       // Swap TVal and FVal if necessary.
8195       if (Swap) {
8196         std::swap(TVal, FVal);
8197         std::swap(CTVal, CFVal);
8198         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8199       }
8200 
8201       if (Opcode != AArch64ISD::CSEL) {
8202         // Drop FVal since we can get its value by simply inverting/negating
8203         // TVal.
8204         FVal = TVal;
8205       }
8206     }
8207 
8208     // Avoid materializing a constant when possible by reusing a known value in
8209     // a register.  However, don't perform this optimization if the known value
8210     // is one, zero or negative one in the case of a CSEL.  We can always
8211     // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the
8212     // FVal, respectively.
8213     ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
8214     if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
8215         !RHSVal->isZero() && !RHSVal->isAllOnes()) {
8216       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8217       // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
8218       // "a != C ? x : a" to avoid materializing C.
8219       if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
8220         TVal = LHS;
8221       else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE)
8222         FVal = LHS;
8223     } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) {
8224       assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
8225       // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
8226       // avoid materializing C.
8227       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8228       if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
8229         Opcode = AArch64ISD::CSINV;
8230         TVal = LHS;
8231         FVal = DAG.getConstant(0, dl, FVal.getValueType());
8232       }
8233     }
8234 
8235     SDValue CCVal;
8236     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
8237     EVT VT = TVal.getValueType();
8238     return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
8239   }
8240 
8241   // Now we know we're dealing with FP values.
8242   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8243          LHS.getValueType() == MVT::f64);
8244   assert(LHS.getValueType() == RHS.getValueType());
8245   EVT VT = TVal.getValueType();
8246   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8247 
8248   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
8249   // clean.  Some of them require two CSELs to implement.
8250   AArch64CC::CondCode CC1, CC2;
8251   changeFPCCToAArch64CC(CC, CC1, CC2);
8252 
8253   if (DAG.getTarget().Options.UnsafeFPMath) {
8254     // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and
8255     // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0.
8256     ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS);
8257     if (RHSVal && RHSVal->isZero()) {
8258       ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal);
8259       ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal);
8260 
8261       if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) &&
8262           CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType())
8263         TVal = LHS;
8264       else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) &&
8265                CFVal && CFVal->isZero() &&
8266                FVal.getValueType() == LHS.getValueType())
8267         FVal = LHS;
8268     }
8269   }
8270 
8271   // Emit first, and possibly only, CSEL.
8272   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8273   SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8274 
8275   // If we need a second CSEL, emit it, using the output of the first as the
8276   // RHS.  We're effectively OR'ing the two CC's together.
8277   if (CC2 != AArch64CC::AL) {
8278     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8279     return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8280   }
8281 
8282   // Otherwise, return the output of the first CSEL.
8283   return CS1;
8284 }
8285 
8286 SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
8287                                                   SelectionDAG &DAG) const {
8288   EVT Ty = Op.getValueType();
8289   auto Idx = Op.getConstantOperandAPInt(2);
8290   int64_t IdxVal = Idx.getSExtValue();
8291   assert(Ty.isScalableVector() &&
8292          "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
8293 
8294   // We can use the splice instruction for certain index values where we are
8295   // able to efficiently generate the correct predicate. The index will be
8296   // inverted and used directly as the input to the ptrue instruction, i.e.
8297   // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
8298   // splice predicate. However, we can only do this if we can guarantee that
8299   // there are enough elements in the vector, hence we check the index <= min
8300   // number of elements.
8301   Optional<unsigned> PredPattern;
8302   if (Ty.isScalableVector() && IdxVal < 0 &&
8303       (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
8304           None) {
8305     SDLoc DL(Op);
8306 
8307     // Create a predicate where all but the last -IdxVal elements are false.
8308     EVT PredVT = Ty.changeVectorElementType(MVT::i1);
8309     SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
8310     Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
8311 
8312     // Now splice the two inputs together using the predicate.
8313     return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
8314                        Op.getOperand(1));
8315   }
8316 
8317   // This will select to an EXT instruction, which has a maximum immediate
8318   // value of 255, hence 2048-bits is the maximum value we can lower.
8319   if (IdxVal >= 0 &&
8320       IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
8321     return Op;
8322 
8323   return SDValue();
8324 }
8325 
8326 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
8327                                               SelectionDAG &DAG) const {
8328   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8329   SDValue LHS = Op.getOperand(0);
8330   SDValue RHS = Op.getOperand(1);
8331   SDValue TVal = Op.getOperand(2);
8332   SDValue FVal = Op.getOperand(3);
8333   SDLoc DL(Op);
8334   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8335 }
8336 
8337 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
8338                                            SelectionDAG &DAG) const {
8339   SDValue CCVal = Op->getOperand(0);
8340   SDValue TVal = Op->getOperand(1);
8341   SDValue FVal = Op->getOperand(2);
8342   SDLoc DL(Op);
8343 
8344   EVT Ty = Op.getValueType();
8345   if (Ty.isScalableVector()) {
8346     SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
8347     MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
8348     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
8349     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8350   }
8351 
8352   if (useSVEForFixedLengthVectorVT(Ty)) {
8353     // FIXME: Ideally this would be the same as above using i1 types, however
8354     // for the moment we can't deal with fixed i1 vector types properly, so
8355     // instead extend the predicate to a result type sized integer vector.
8356     MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits());
8357     MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount());
8358     SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT);
8359     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal);
8360     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8361   }
8362 
8363   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
8364   // instruction.
8365   if (ISD::isOverflowIntrOpRes(CCVal)) {
8366     // Only lower legal XALUO ops.
8367     if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0)))
8368       return SDValue();
8369 
8370     AArch64CC::CondCode OFCC;
8371     SDValue Value, Overflow;
8372     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
8373     SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
8374 
8375     return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
8376                        CCVal, Overflow);
8377   }
8378 
8379   // Lower it the same way as we would lower a SELECT_CC node.
8380   ISD::CondCode CC;
8381   SDValue LHS, RHS;
8382   if (CCVal.getOpcode() == ISD::SETCC) {
8383     LHS = CCVal.getOperand(0);
8384     RHS = CCVal.getOperand(1);
8385     CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get();
8386   } else {
8387     LHS = CCVal;
8388     RHS = DAG.getConstant(0, DL, CCVal.getValueType());
8389     CC = ISD::SETNE;
8390   }
8391   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8392 }
8393 
8394 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
8395                                               SelectionDAG &DAG) const {
8396   // Jump table entries as PC relative offsets. No additional tweaking
8397   // is necessary here. Just get the address of the jump table.
8398   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
8399 
8400   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8401       !Subtarget->isTargetMachO()) {
8402     return getAddrLarge(JT, DAG);
8403   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8404     return getAddrTiny(JT, DAG);
8405   }
8406   return getAddr(JT, DAG);
8407 }
8408 
8409 SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
8410                                           SelectionDAG &DAG) const {
8411   // Jump table entries as PC relative offsets. No additional tweaking
8412   // is necessary here. Just get the address of the jump table.
8413   SDLoc DL(Op);
8414   SDValue JT = Op.getOperand(1);
8415   SDValue Entry = Op.getOperand(2);
8416   int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex();
8417 
8418   auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8419   AFI->setJumpTableEntryInfo(JTI, 4, nullptr);
8420 
8421   SDNode *Dest =
8422       DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT,
8423                          Entry, DAG.getTargetJumpTable(JTI, MVT::i32));
8424   return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0),
8425                      SDValue(Dest, 0));
8426 }
8427 
8428 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
8429                                                  SelectionDAG &DAG) const {
8430   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
8431 
8432   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
8433     // Use the GOT for the large code model on iOS.
8434     if (Subtarget->isTargetMachO()) {
8435       return getGOT(CP, DAG);
8436     }
8437     return getAddrLarge(CP, DAG);
8438   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8439     return getAddrTiny(CP, DAG);
8440   } else {
8441     return getAddr(CP, DAG);
8442   }
8443 }
8444 
8445 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
8446                                                SelectionDAG &DAG) const {
8447   BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
8448   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8449       !Subtarget->isTargetMachO()) {
8450     return getAddrLarge(BA, DAG);
8451   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8452     return getAddrTiny(BA, DAG);
8453   }
8454   return getAddr(BA, DAG);
8455 }
8456 
8457 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
8458                                                  SelectionDAG &DAG) const {
8459   AArch64FunctionInfo *FuncInfo =
8460       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8461 
8462   SDLoc DL(Op);
8463   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(),
8464                                  getPointerTy(DAG.getDataLayout()));
8465   FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout()));
8466   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8467   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8468                       MachinePointerInfo(SV));
8469 }
8470 
8471 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
8472                                                   SelectionDAG &DAG) const {
8473   AArch64FunctionInfo *FuncInfo =
8474       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8475 
8476   SDLoc DL(Op);
8477   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
8478                                      ? FuncInfo->getVarArgsGPRIndex()
8479                                      : FuncInfo->getVarArgsStackIndex(),
8480                                  getPointerTy(DAG.getDataLayout()));
8481   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8482   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8483                       MachinePointerInfo(SV));
8484 }
8485 
8486 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
8487                                                   SelectionDAG &DAG) const {
8488   // The layout of the va_list struct is specified in the AArch64 Procedure Call
8489   // Standard, section B.3.
8490   MachineFunction &MF = DAG.getMachineFunction();
8491   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
8492   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8493   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8494   auto PtrVT = getPointerTy(DAG.getDataLayout());
8495   SDLoc DL(Op);
8496 
8497   SDValue Chain = Op.getOperand(0);
8498   SDValue VAList = Op.getOperand(1);
8499   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8500   SmallVector<SDValue, 4> MemOps;
8501 
8502   // void *__stack at offset 0
8503   unsigned Offset = 0;
8504   SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
8505   Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT);
8506   MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
8507                                 MachinePointerInfo(SV), Align(PtrSize)));
8508 
8509   // void *__gr_top at offset 8 (4 on ILP32)
8510   Offset += PtrSize;
8511   int GPRSize = FuncInfo->getVarArgsGPRSize();
8512   if (GPRSize > 0) {
8513     SDValue GRTop, GRTopAddr;
8514 
8515     GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8516                             DAG.getConstant(Offset, DL, PtrVT));
8517 
8518     GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT);
8519     GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop,
8520                         DAG.getConstant(GPRSize, DL, PtrVT));
8521     GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT);
8522 
8523     MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
8524                                   MachinePointerInfo(SV, Offset),
8525                                   Align(PtrSize)));
8526   }
8527 
8528   // void *__vr_top at offset 16 (8 on ILP32)
8529   Offset += PtrSize;
8530   int FPRSize = FuncInfo->getVarArgsFPRSize();
8531   if (FPRSize > 0) {
8532     SDValue VRTop, VRTopAddr;
8533     VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8534                             DAG.getConstant(Offset, DL, PtrVT));
8535 
8536     VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT);
8537     VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop,
8538                         DAG.getConstant(FPRSize, DL, PtrVT));
8539     VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT);
8540 
8541     MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
8542                                   MachinePointerInfo(SV, Offset),
8543                                   Align(PtrSize)));
8544   }
8545 
8546   // int __gr_offs at offset 24 (12 on ILP32)
8547   Offset += PtrSize;
8548   SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8549                                    DAG.getConstant(Offset, DL, PtrVT));
8550   MemOps.push_back(
8551       DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32),
8552                    GROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8553 
8554   // int __vr_offs at offset 28 (16 on ILP32)
8555   Offset += 4;
8556   SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8557                                    DAG.getConstant(Offset, DL, PtrVT));
8558   MemOps.push_back(
8559       DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32),
8560                    VROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8561 
8562   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
8563 }
8564 
8565 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
8566                                             SelectionDAG &DAG) const {
8567   MachineFunction &MF = DAG.getMachineFunction();
8568 
8569   if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
8570     return LowerWin64_VASTART(Op, DAG);
8571   else if (Subtarget->isTargetDarwin())
8572     return LowerDarwin_VASTART(Op, DAG);
8573   else
8574     return LowerAAPCS_VASTART(Op, DAG);
8575 }
8576 
8577 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
8578                                            SelectionDAG &DAG) const {
8579   // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
8580   // pointer.
8581   SDLoc DL(Op);
8582   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8583   unsigned VaListSize =
8584       (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
8585           ? PtrSize
8586           : Subtarget->isTargetILP32() ? 20 : 32;
8587   const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
8588   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8589 
8590   return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2),
8591                        DAG.getConstant(VaListSize, DL, MVT::i32),
8592                        Align(PtrSize), false, false, false,
8593                        MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
8594 }
8595 
8596 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
8597   assert(Subtarget->isTargetDarwin() &&
8598          "automatic va_arg instruction only works on Darwin");
8599 
8600   const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8601   EVT VT = Op.getValueType();
8602   SDLoc DL(Op);
8603   SDValue Chain = Op.getOperand(0);
8604   SDValue Addr = Op.getOperand(1);
8605   MaybeAlign Align(Op.getConstantOperandVal(3));
8606   unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
8607   auto PtrVT = getPointerTy(DAG.getDataLayout());
8608   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8609   SDValue VAList =
8610       DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V));
8611   Chain = VAList.getValue(1);
8612   VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
8613 
8614   if (VT.isScalableVector())
8615     report_fatal_error("Passing SVE types to variadic functions is "
8616                        "currently not supported");
8617 
8618   if (Align && *Align > MinSlotSize) {
8619     VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8620                          DAG.getConstant(Align->value() - 1, DL, PtrVT));
8621     VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
8622                          DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
8623   }
8624 
8625   Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8626   unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
8627 
8628   // Scalar integer and FP values smaller than 64 bits are implicitly extended
8629   // up to 64 bits.  At the very least, we have to increase the striding of the
8630   // vaargs list to match this, and for FP values we need to introduce
8631   // FP_ROUND nodes as well.
8632   if (VT.isInteger() && !VT.isVector())
8633     ArgSize = std::max(ArgSize, MinSlotSize);
8634   bool NeedFPTrunc = false;
8635   if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
8636     ArgSize = 8;
8637     NeedFPTrunc = true;
8638   }
8639 
8640   // Increment the pointer, VAList, to the next vaarg
8641   SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8642                                DAG.getConstant(ArgSize, DL, PtrVT));
8643   VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT);
8644 
8645   // Store the incremented VAList to the legalized pointer
8646   SDValue APStore =
8647       DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
8648 
8649   // Load the actual argument out of the pointer VAList
8650   if (NeedFPTrunc) {
8651     // Load the value as an f64.
8652     SDValue WideFP =
8653         DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
8654     // Round the value down to an f32.
8655     SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
8656                                    DAG.getIntPtrConstant(1, DL));
8657     SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
8658     // Merge the rounded value with the chain output of the load.
8659     return DAG.getMergeValues(Ops, DL);
8660   }
8661 
8662   return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
8663 }
8664 
8665 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
8666                                               SelectionDAG &DAG) const {
8667   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8668   MFI.setFrameAddressIsTaken(true);
8669 
8670   EVT VT = Op.getValueType();
8671   SDLoc DL(Op);
8672   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8673   SDValue FrameAddr =
8674       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
8675   while (Depth--)
8676     FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
8677                             MachinePointerInfo());
8678 
8679   if (Subtarget->isTargetILP32())
8680     FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr,
8681                             DAG.getValueType(VT));
8682 
8683   return FrameAddr;
8684 }
8685 
8686 SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
8687                                               SelectionDAG &DAG) const {
8688   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8689 
8690   EVT VT = getPointerTy(DAG.getDataLayout());
8691   SDLoc DL(Op);
8692   int FI = MFI.CreateFixedObject(4, 0, false);
8693   return DAG.getFrameIndex(FI, VT);
8694 }
8695 
8696 #define GET_REGISTER_MATCHER
8697 #include "AArch64GenAsmMatcher.inc"
8698 
8699 // FIXME? Maybe this could be a TableGen attribute on some registers and
8700 // this table could be generated automatically from RegInfo.
8701 Register AArch64TargetLowering::
8702 getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {
8703   Register Reg = MatchRegisterName(RegName);
8704   if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
8705     const MCRegisterInfo *MRI = Subtarget->getRegisterInfo();
8706     unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false);
8707     if (!Subtarget->isXRegisterReserved(DwarfRegNum))
8708       Reg = 0;
8709   }
8710   if (Reg)
8711     return Reg;
8712   report_fatal_error(Twine("Invalid register name \""
8713                               + StringRef(RegName)  + "\"."));
8714 }
8715 
8716 SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
8717                                                      SelectionDAG &DAG) const {
8718   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
8719 
8720   EVT VT = Op.getValueType();
8721   SDLoc DL(Op);
8722 
8723   SDValue FrameAddr =
8724       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
8725   SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8726 
8727   return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset);
8728 }
8729 
8730 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
8731                                                SelectionDAG &DAG) const {
8732   MachineFunction &MF = DAG.getMachineFunction();
8733   MachineFrameInfo &MFI = MF.getFrameInfo();
8734   MFI.setReturnAddressIsTaken(true);
8735 
8736   EVT VT = Op.getValueType();
8737   SDLoc DL(Op);
8738   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8739   SDValue ReturnAddress;
8740   if (Depth) {
8741     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
8742     SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8743     ReturnAddress = DAG.getLoad(
8744         VT, DL, DAG.getEntryNode(),
8745         DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo());
8746   } else {
8747     // Return LR, which contains the return address. Mark it an implicit
8748     // live-in.
8749     Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
8750     ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
8751   }
8752 
8753   // The XPACLRI instruction assembles to a hint-space instruction before
8754   // Armv8.3-A therefore this instruction can be safely used for any pre
8755   // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use
8756   // that instead.
8757   SDNode *St;
8758   if (Subtarget->hasPAuth()) {
8759     St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress);
8760   } else {
8761     // XPACLRI operates on LR therefore we must move the operand accordingly.
8762     SDValue Chain =
8763         DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress);
8764     St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain);
8765   }
8766   return SDValue(St, 0);
8767 }
8768 
8769 /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
8770 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
8771 SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
8772                                                SelectionDAG &DAG) const {
8773   SDValue Lo, Hi;
8774   expandShiftParts(Op.getNode(), Lo, Hi, DAG);
8775   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
8776 }
8777 
8778 bool AArch64TargetLowering::isOffsetFoldingLegal(
8779     const GlobalAddressSDNode *GA) const {
8780   // Offsets are folded in the DAG combine rather than here so that we can
8781   // intelligently choose an offset based on the uses.
8782   return false;
8783 }
8784 
8785 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
8786                                          bool OptForSize) const {
8787   bool IsLegal = false;
8788   // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
8789   // 16-bit case when target has full fp16 support.
8790   // FIXME: We should be able to handle f128 as well with a clever lowering.
8791   const APInt ImmInt = Imm.bitcastToAPInt();
8792   if (VT == MVT::f64)
8793     IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
8794   else if (VT == MVT::f32)
8795     IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
8796   else if (VT == MVT::f16 && Subtarget->hasFullFP16())
8797     IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero();
8798   // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to
8799   //       generate that fmov.
8800 
8801   // If we can not materialize in immediate field for fmov, check if the
8802   // value can be encoded as the immediate operand of a logical instruction.
8803   // The immediate value will be created with either MOVZ, MOVN, or ORR.
8804   if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) {
8805     // The cost is actually exactly the same for mov+fmov vs. adrp+ldr;
8806     // however the mov+fmov sequence is always better because of the reduced
8807     // cache pressure. The timings are still the same if you consider
8808     // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
8809     // movw+movk is fused). So we limit up to 2 instrdduction at most.
8810     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
8811     AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(),
8812 			      Insn);
8813     unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2));
8814     IsLegal = Insn.size() <= Limit;
8815   }
8816 
8817   LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()
8818                     << " imm value: "; Imm.dump(););
8819   return IsLegal;
8820 }
8821 
8822 //===----------------------------------------------------------------------===//
8823 //                          AArch64 Optimization Hooks
8824 //===----------------------------------------------------------------------===//
8825 
8826 static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
8827                            SDValue Operand, SelectionDAG &DAG,
8828                            int &ExtraSteps) {
8829   EVT VT = Operand.getValueType();
8830   if ((ST->hasNEON() &&
8831        (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 ||
8832         VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 ||
8833         VT == MVT::v4f32)) ||
8834       (ST->hasSVE() &&
8835        (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) {
8836     if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
8837       // For the reciprocal estimates, convergence is quadratic, so the number
8838       // of digits is doubled after each iteration.  In ARMv8, the accuracy of
8839       // the initial estimate is 2^-8.  Thus the number of extra steps to refine
8840       // the result for float (23 mantissa bits) is 2 and for double (52
8841       // mantissa bits) is 3.
8842       ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2;
8843 
8844     return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
8845   }
8846 
8847   return SDValue();
8848 }
8849 
8850 SDValue
8851 AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
8852                                         const DenormalMode &Mode) const {
8853   SDLoc DL(Op);
8854   EVT VT = Op.getValueType();
8855   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
8856   SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
8857   return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
8858 }
8859 
8860 SDValue
8861 AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
8862                                                    SelectionDAG &DAG) const {
8863   return Op;
8864 }
8865 
8866 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
8867                                                SelectionDAG &DAG, int Enabled,
8868                                                int &ExtraSteps,
8869                                                bool &UseOneConst,
8870                                                bool Reciprocal) const {
8871   if (Enabled == ReciprocalEstimate::Enabled ||
8872       (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt()))
8873     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand,
8874                                        DAG, ExtraSteps)) {
8875       SDLoc DL(Operand);
8876       EVT VT = Operand.getValueType();
8877 
8878       SDNodeFlags Flags;
8879       Flags.setAllowReassociation(true);
8880 
8881       // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2)
8882       // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
8883       for (int i = ExtraSteps; i > 0; --i) {
8884         SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
8885                                    Flags);
8886         Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
8887         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8888       }
8889       if (!Reciprocal)
8890         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
8891 
8892       ExtraSteps = 0;
8893       return Estimate;
8894     }
8895 
8896   return SDValue();
8897 }
8898 
8899 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
8900                                                 SelectionDAG &DAG, int Enabled,
8901                                                 int &ExtraSteps) const {
8902   if (Enabled == ReciprocalEstimate::Enabled)
8903     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand,
8904                                        DAG, ExtraSteps)) {
8905       SDLoc DL(Operand);
8906       EVT VT = Operand.getValueType();
8907 
8908       SDNodeFlags Flags;
8909       Flags.setAllowReassociation(true);
8910 
8911       // Newton reciprocal iteration: E * (2 - X * E)
8912       // AArch64 reciprocal iteration instruction: (2 - M * N)
8913       for (int i = ExtraSteps; i > 0; --i) {
8914         SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
8915                                    Estimate, Flags);
8916         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8917       }
8918 
8919       ExtraSteps = 0;
8920       return Estimate;
8921     }
8922 
8923   return SDValue();
8924 }
8925 
8926 //===----------------------------------------------------------------------===//
8927 //                          AArch64 Inline Assembly Support
8928 //===----------------------------------------------------------------------===//
8929 
8930 // Table of Constraints
8931 // TODO: This is the current set of constraints supported by ARM for the
8932 // compiler, not all of them may make sense.
8933 //
8934 // r - A general register
8935 // w - An FP/SIMD register of some size in the range v0-v31
8936 // x - An FP/SIMD register of some size in the range v0-v15
8937 // I - Constant that can be used with an ADD instruction
8938 // J - Constant that can be used with a SUB instruction
8939 // K - Constant that can be used with a 32-bit logical instruction
8940 // L - Constant that can be used with a 64-bit logical instruction
8941 // M - Constant that can be used as a 32-bit MOV immediate
8942 // N - Constant that can be used as a 64-bit MOV immediate
8943 // Q - A memory reference with base register and no offset
8944 // S - A symbolic address
8945 // Y - Floating point constant zero
8946 // Z - Integer constant zero
8947 //
8948 //   Note that general register operands will be output using their 64-bit x
8949 // register name, whatever the size of the variable, unless the asm operand
8950 // is prefixed by the %w modifier. Floating-point and SIMD register operands
8951 // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
8952 // %q modifier.
8953 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
8954   // At this point, we have to lower this constraint to something else, so we
8955   // lower it to an "r" or "w". However, by doing this we will force the result
8956   // to be in register, while the X constraint is much more permissive.
8957   //
8958   // Although we are correct (we are free to emit anything, without
8959   // constraints), we might break use cases that would expect us to be more
8960   // efficient and emit something else.
8961   if (!Subtarget->hasFPARMv8())
8962     return "r";
8963 
8964   if (ConstraintVT.isFloatingPoint())
8965     return "w";
8966 
8967   if (ConstraintVT.isVector() &&
8968      (ConstraintVT.getSizeInBits() == 64 ||
8969       ConstraintVT.getSizeInBits() == 128))
8970     return "w";
8971 
8972   return "r";
8973 }
8974 
8975 enum PredicateConstraint {
8976   Upl,
8977   Upa,
8978   Invalid
8979 };
8980 
8981 static PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
8982   PredicateConstraint P = PredicateConstraint::Invalid;
8983   if (Constraint == "Upa")
8984     P = PredicateConstraint::Upa;
8985   if (Constraint == "Upl")
8986     P = PredicateConstraint::Upl;
8987   return P;
8988 }
8989 
8990 /// getConstraintType - Given a constraint letter, return the type of
8991 /// constraint it is for this target.
8992 AArch64TargetLowering::ConstraintType
8993 AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
8994   if (Constraint.size() == 1) {
8995     switch (Constraint[0]) {
8996     default:
8997       break;
8998     case 'x':
8999     case 'w':
9000     case 'y':
9001       return C_RegisterClass;
9002     // An address with a single base register. Due to the way we
9003     // currently handle addresses it is the same as 'r'.
9004     case 'Q':
9005       return C_Memory;
9006     case 'I':
9007     case 'J':
9008     case 'K':
9009     case 'L':
9010     case 'M':
9011     case 'N':
9012     case 'Y':
9013     case 'Z':
9014       return C_Immediate;
9015     case 'z':
9016     case 'S': // A symbolic address
9017       return C_Other;
9018     }
9019   } else if (parsePredicateConstraint(Constraint) !=
9020              PredicateConstraint::Invalid)
9021       return C_RegisterClass;
9022   return TargetLowering::getConstraintType(Constraint);
9023 }
9024 
9025 /// Examine constraint type and operand type and determine a weight value.
9026 /// This object must already have been set up with the operand type
9027 /// and the current alternative constraint selected.
9028 TargetLowering::ConstraintWeight
9029 AArch64TargetLowering::getSingleConstraintMatchWeight(
9030     AsmOperandInfo &info, const char *constraint) const {
9031   ConstraintWeight weight = CW_Invalid;
9032   Value *CallOperandVal = info.CallOperandVal;
9033   // If we don't have a value, we can't do a match,
9034   // but allow it at the lowest weight.
9035   if (!CallOperandVal)
9036     return CW_Default;
9037   Type *type = CallOperandVal->getType();
9038   // Look at the constraint type.
9039   switch (*constraint) {
9040   default:
9041     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
9042     break;
9043   case 'x':
9044   case 'w':
9045   case 'y':
9046     if (type->isFloatingPointTy() || type->isVectorTy())
9047       weight = CW_Register;
9048     break;
9049   case 'z':
9050     weight = CW_Constant;
9051     break;
9052   case 'U':
9053     if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
9054       weight = CW_Register;
9055     break;
9056   }
9057   return weight;
9058 }
9059 
9060 std::pair<unsigned, const TargetRegisterClass *>
9061 AArch64TargetLowering::getRegForInlineAsmConstraint(
9062     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
9063   if (Constraint.size() == 1) {
9064     switch (Constraint[0]) {
9065     case 'r':
9066       if (VT.isScalableVector())
9067         return std::make_pair(0U, nullptr);
9068       if (Subtarget->hasLS64() && VT.getSizeInBits() == 512)
9069         return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass);
9070       if (VT.getFixedSizeInBits() == 64)
9071         return std::make_pair(0U, &AArch64::GPR64commonRegClass);
9072       return std::make_pair(0U, &AArch64::GPR32commonRegClass);
9073     case 'w': {
9074       if (!Subtarget->hasFPARMv8())
9075         break;
9076       if (VT.isScalableVector()) {
9077         if (VT.getVectorElementType() != MVT::i1)
9078           return std::make_pair(0U, &AArch64::ZPRRegClass);
9079         return std::make_pair(0U, nullptr);
9080       }
9081       uint64_t VTSize = VT.getFixedSizeInBits();
9082       if (VTSize == 16)
9083         return std::make_pair(0U, &AArch64::FPR16RegClass);
9084       if (VTSize == 32)
9085         return std::make_pair(0U, &AArch64::FPR32RegClass);
9086       if (VTSize == 64)
9087         return std::make_pair(0U, &AArch64::FPR64RegClass);
9088       if (VTSize == 128)
9089         return std::make_pair(0U, &AArch64::FPR128RegClass);
9090       break;
9091     }
9092     // The instructions that this constraint is designed for can
9093     // only take 128-bit registers so just use that regclass.
9094     case 'x':
9095       if (!Subtarget->hasFPARMv8())
9096         break;
9097       if (VT.isScalableVector())
9098         return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
9099       if (VT.getSizeInBits() == 128)
9100         return std::make_pair(0U, &AArch64::FPR128_loRegClass);
9101       break;
9102     case 'y':
9103       if (!Subtarget->hasFPARMv8())
9104         break;
9105       if (VT.isScalableVector())
9106         return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
9107       break;
9108     }
9109   } else {
9110     PredicateConstraint PC = parsePredicateConstraint(Constraint);
9111     if (PC != PredicateConstraint::Invalid) {
9112       if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
9113         return std::make_pair(0U, nullptr);
9114       bool restricted = (PC == PredicateConstraint::Upl);
9115       return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
9116                         : std::make_pair(0U, &AArch64::PPRRegClass);
9117     }
9118   }
9119   if (StringRef("{cc}").equals_insensitive(Constraint))
9120     return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
9121 
9122   // Use the default implementation in TargetLowering to convert the register
9123   // constraint into a member of a register class.
9124   std::pair<unsigned, const TargetRegisterClass *> Res;
9125   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9126 
9127   // Not found as a standard register?
9128   if (!Res.second) {
9129     unsigned Size = Constraint.size();
9130     if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
9131         tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
9132       int RegNo;
9133       bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
9134       if (!Failed && RegNo >= 0 && RegNo <= 31) {
9135         // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
9136         // By default we'll emit v0-v31 for this unless there's a modifier where
9137         // we'll emit the correct register as well.
9138         if (VT != MVT::Other && VT.getSizeInBits() == 64) {
9139           Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
9140           Res.second = &AArch64::FPR64RegClass;
9141         } else {
9142           Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
9143           Res.second = &AArch64::FPR128RegClass;
9144         }
9145       }
9146     }
9147   }
9148 
9149   if (Res.second && !Subtarget->hasFPARMv8() &&
9150       !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
9151       !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
9152     return std::make_pair(0U, nullptr);
9153 
9154   return Res;
9155 }
9156 
9157 EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
9158                                                   llvm::Type *Ty,
9159                                                   bool AllowUnknown) const {
9160   if (Subtarget->hasLS64() && Ty->isIntegerTy(512))
9161     return EVT(MVT::i64x8);
9162 
9163   return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
9164 }
9165 
9166 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
9167 /// vector.  If it is invalid, don't add anything to Ops.
9168 void AArch64TargetLowering::LowerAsmOperandForConstraint(
9169     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9170     SelectionDAG &DAG) const {
9171   SDValue Result;
9172 
9173   // Currently only support length 1 constraints.
9174   if (Constraint.length() != 1)
9175     return;
9176 
9177   char ConstraintLetter = Constraint[0];
9178   switch (ConstraintLetter) {
9179   default:
9180     break;
9181 
9182   // This set of constraints deal with valid constants for various instructions.
9183   // Validate and return a target constant for them if we can.
9184   case 'z': {
9185     // 'z' maps to xzr or wzr so it needs an input of 0.
9186     if (!isNullConstant(Op))
9187       return;
9188 
9189     if (Op.getValueType() == MVT::i64)
9190       Result = DAG.getRegister(AArch64::XZR, MVT::i64);
9191     else
9192       Result = DAG.getRegister(AArch64::WZR, MVT::i32);
9193     break;
9194   }
9195   case 'S': {
9196     // An absolute symbolic address or label reference.
9197     if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9198       Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9199                                           GA->getValueType(0));
9200     } else if (const BlockAddressSDNode *BA =
9201                    dyn_cast<BlockAddressSDNode>(Op)) {
9202       Result =
9203           DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0));
9204     } else
9205       return;
9206     break;
9207   }
9208 
9209   case 'I':
9210   case 'J':
9211   case 'K':
9212   case 'L':
9213   case 'M':
9214   case 'N':
9215     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
9216     if (!C)
9217       return;
9218 
9219     // Grab the value and do some validation.
9220     uint64_t CVal = C->getZExtValue();
9221     switch (ConstraintLetter) {
9222     // The I constraint applies only to simple ADD or SUB immediate operands:
9223     // i.e. 0 to 4095 with optional shift by 12
9224     // The J constraint applies only to ADD or SUB immediates that would be
9225     // valid when negated, i.e. if [an add pattern] were to be output as a SUB
9226     // instruction [or vice versa], in other words -1 to -4095 with optional
9227     // left shift by 12.
9228     case 'I':
9229       if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
9230         break;
9231       return;
9232     case 'J': {
9233       uint64_t NVal = -C->getSExtValue();
9234       if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
9235         CVal = C->getSExtValue();
9236         break;
9237       }
9238       return;
9239     }
9240     // The K and L constraints apply *only* to logical immediates, including
9241     // what used to be the MOVI alias for ORR (though the MOVI alias has now
9242     // been removed and MOV should be used). So these constraints have to
9243     // distinguish between bit patterns that are valid 32-bit or 64-bit
9244     // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
9245     // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
9246     // versa.
9247     case 'K':
9248       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9249         break;
9250       return;
9251     case 'L':
9252       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9253         break;
9254       return;
9255     // The M and N constraints are a superset of K and L respectively, for use
9256     // with the MOV (immediate) alias. As well as the logical immediates they
9257     // also match 32 or 64-bit immediates that can be loaded either using a
9258     // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
9259     // (M) or 64-bit 0x1234000000000000 (N) etc.
9260     // As a note some of this code is liberally stolen from the asm parser.
9261     case 'M': {
9262       if (!isUInt<32>(CVal))
9263         return;
9264       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9265         break;
9266       if ((CVal & 0xFFFF) == CVal)
9267         break;
9268       if ((CVal & 0xFFFF0000ULL) == CVal)
9269         break;
9270       uint64_t NCVal = ~(uint32_t)CVal;
9271       if ((NCVal & 0xFFFFULL) == NCVal)
9272         break;
9273       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9274         break;
9275       return;
9276     }
9277     case 'N': {
9278       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9279         break;
9280       if ((CVal & 0xFFFFULL) == CVal)
9281         break;
9282       if ((CVal & 0xFFFF0000ULL) == CVal)
9283         break;
9284       if ((CVal & 0xFFFF00000000ULL) == CVal)
9285         break;
9286       if ((CVal & 0xFFFF000000000000ULL) == CVal)
9287         break;
9288       uint64_t NCVal = ~CVal;
9289       if ((NCVal & 0xFFFFULL) == NCVal)
9290         break;
9291       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9292         break;
9293       if ((NCVal & 0xFFFF00000000ULL) == NCVal)
9294         break;
9295       if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
9296         break;
9297       return;
9298     }
9299     default:
9300       return;
9301     }
9302 
9303     // All assembler immediates are 64-bit integers.
9304     Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
9305     break;
9306   }
9307 
9308   if (Result.getNode()) {
9309     Ops.push_back(Result);
9310     return;
9311   }
9312 
9313   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9314 }
9315 
9316 //===----------------------------------------------------------------------===//
9317 //                     AArch64 Advanced SIMD Support
9318 //===----------------------------------------------------------------------===//
9319 
9320 /// WidenVector - Given a value in the V64 register class, produce the
9321 /// equivalent value in the V128 register class.
9322 static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
9323   EVT VT = V64Reg.getValueType();
9324   unsigned NarrowSize = VT.getVectorNumElements();
9325   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9326   MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
9327   SDLoc DL(V64Reg);
9328 
9329   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
9330                      V64Reg, DAG.getConstant(0, DL, MVT::i64));
9331 }
9332 
9333 /// getExtFactor - Determine the adjustment factor for the position when
9334 /// generating an "extract from vector registers" instruction.
9335 static unsigned getExtFactor(SDValue &V) {
9336   EVT EltType = V.getValueType().getVectorElementType();
9337   return EltType.getSizeInBits() / 8;
9338 }
9339 
9340 /// NarrowVector - Given a value in the V128 register class, produce the
9341 /// equivalent value in the V64 register class.
9342 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
9343   EVT VT = V128Reg.getValueType();
9344   unsigned WideSize = VT.getVectorNumElements();
9345   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9346   MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
9347   SDLoc DL(V128Reg);
9348 
9349   return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
9350 }
9351 
9352 // Gather data to see if the operation can be modelled as a
9353 // shuffle in combination with VEXTs.
9354 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
9355                                                   SelectionDAG &DAG) const {
9356   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
9357   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
9358   SDLoc dl(Op);
9359   EVT VT = Op.getValueType();
9360   assert(!VT.isScalableVector() &&
9361          "Scalable vectors cannot be used with ISD::BUILD_VECTOR");
9362   unsigned NumElts = VT.getVectorNumElements();
9363 
9364   struct ShuffleSourceInfo {
9365     SDValue Vec;
9366     unsigned MinElt;
9367     unsigned MaxElt;
9368 
9369     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
9370     // be compatible with the shuffle we intend to construct. As a result
9371     // ShuffleVec will be some sliding window into the original Vec.
9372     SDValue ShuffleVec;
9373 
9374     // Code should guarantee that element i in Vec starts at element "WindowBase
9375     // + i * WindowScale in ShuffleVec".
9376     int WindowBase;
9377     int WindowScale;
9378 
9379     ShuffleSourceInfo(SDValue Vec)
9380       : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
9381           ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
9382 
9383     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
9384   };
9385 
9386   // First gather all vectors used as an immediate source for this BUILD_VECTOR
9387   // node.
9388   SmallVector<ShuffleSourceInfo, 2> Sources;
9389   for (unsigned i = 0; i < NumElts; ++i) {
9390     SDValue V = Op.getOperand(i);
9391     if (V.isUndef())
9392       continue;
9393     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9394              !isa<ConstantSDNode>(V.getOperand(1)) ||
9395              V.getOperand(0).getValueType().isScalableVector()) {
9396       LLVM_DEBUG(
9397           dbgs() << "Reshuffle failed: "
9398                     "a shuffle can only come from building a vector from "
9399                     "various elements of other fixed-width vectors, provided "
9400                     "their indices are constant\n");
9401       return SDValue();
9402     }
9403 
9404     // Add this element source to the list if it's not already there.
9405     SDValue SourceVec = V.getOperand(0);
9406     auto Source = find(Sources, SourceVec);
9407     if (Source == Sources.end())
9408       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
9409 
9410     // Update the minimum and maximum lane number seen.
9411     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
9412     Source->MinElt = std::min(Source->MinElt, EltNo);
9413     Source->MaxElt = std::max(Source->MaxElt, EltNo);
9414   }
9415 
9416   // If we have 3 or 4 sources, try to generate a TBL, which will at least be
9417   // better than moving to/from gpr registers for larger vectors.
9418   if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) {
9419     // Construct a mask for the tbl. We may need to adjust the index for types
9420     // larger than i8.
9421     SmallVector<unsigned, 16> Mask;
9422     unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
9423     for (unsigned I = 0; I < NumElts; ++I) {
9424       SDValue V = Op.getOperand(I);
9425       if (V.isUndef()) {
9426         for (unsigned OF = 0; OF < OutputFactor; OF++)
9427           Mask.push_back(-1);
9428         continue;
9429       }
9430       // Set the Mask lanes adjusted for the size of the input and output
9431       // lanes. The Mask is always i8, so it will set OutputFactor lanes per
9432       // output element, adjusted in their positions per input and output types.
9433       unsigned Lane = V.getConstantOperandVal(1);
9434       for (unsigned S = 0; S < Sources.size(); S++) {
9435         if (V.getOperand(0) == Sources[S].Vec) {
9436           unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits();
9437           unsigned InputBase = 16 * S + Lane * InputSize / 8;
9438           for (unsigned OF = 0; OF < OutputFactor; OF++)
9439             Mask.push_back(InputBase + OF);
9440           break;
9441         }
9442       }
9443     }
9444 
9445     // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to
9446     // v16i8, and the TBLMask
9447     SmallVector<SDValue, 16> TBLOperands;
9448     TBLOperands.push_back(DAG.getConstant(Sources.size() == 3
9449                                               ? Intrinsic::aarch64_neon_tbl3
9450                                               : Intrinsic::aarch64_neon_tbl4,
9451                                           dl, MVT::i32));
9452     for (unsigned i = 0; i < Sources.size(); i++) {
9453       SDValue Src = Sources[i].Vec;
9454       EVT SrcVT = Src.getValueType();
9455       Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src);
9456       assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&
9457              "Expected a legally typed vector");
9458       if (SrcVT.is64BitVector())
9459         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src,
9460                           DAG.getUNDEF(MVT::v8i8));
9461       TBLOperands.push_back(Src);
9462     }
9463 
9464     SmallVector<SDValue, 16> TBLMask;
9465     for (unsigned i = 0; i < Mask.size(); i++)
9466       TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32));
9467     assert((Mask.size() == 8 || Mask.size() == 16) &&
9468            "Expected a v8i8 or v16i8 Mask");
9469     TBLOperands.push_back(
9470         DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask));
9471 
9472     SDValue Shuffle =
9473         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
9474                     Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands);
9475     return DAG.getBitcast(VT, Shuffle);
9476   }
9477 
9478   if (Sources.size() > 2) {
9479     LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "
9480                       << "sensible when at most two source vectors are "
9481                       << "involved\n");
9482     return SDValue();
9483   }
9484 
9485   // Find out the smallest element size among result and two sources, and use
9486   // it as element size to build the shuffle_vector.
9487   EVT SmallestEltTy = VT.getVectorElementType();
9488   for (auto &Source : Sources) {
9489     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
9490     if (SrcEltTy.bitsLT(SmallestEltTy)) {
9491       SmallestEltTy = SrcEltTy;
9492     }
9493   }
9494   unsigned ResMultiplier =
9495       VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9496   uint64_t VTSize = VT.getFixedSizeInBits();
9497   NumElts = VTSize / SmallestEltTy.getFixedSizeInBits();
9498   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
9499 
9500   // If the source vector is too wide or too narrow, we may nevertheless be able
9501   // to construct a compatible shuffle either by concatenating it with UNDEF or
9502   // extracting a suitable range of elements.
9503   for (auto &Src : Sources) {
9504     EVT SrcVT = Src.ShuffleVec.getValueType();
9505 
9506     TypeSize SrcVTSize = SrcVT.getSizeInBits();
9507     if (SrcVTSize == TypeSize::Fixed(VTSize))
9508       continue;
9509 
9510     // This stage of the search produces a source with the same element type as
9511     // the original, but with a total width matching the BUILD_VECTOR output.
9512     EVT EltVT = SrcVT.getVectorElementType();
9513     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
9514     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
9515 
9516     if (SrcVTSize.getFixedValue() < VTSize) {
9517       assert(2 * SrcVTSize == VTSize);
9518       // We can pad out the smaller vector for free, so if it's part of a
9519       // shuffle...
9520       Src.ShuffleVec =
9521           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
9522                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
9523       continue;
9524     }
9525 
9526     if (SrcVTSize.getFixedValue() != 2 * VTSize) {
9527       LLVM_DEBUG(
9528           dbgs() << "Reshuffle failed: result vector too small to extract\n");
9529       return SDValue();
9530     }
9531 
9532     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
9533       LLVM_DEBUG(
9534           dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
9535       return SDValue();
9536     }
9537 
9538     if (Src.MinElt >= NumSrcElts) {
9539       // The extraction can just take the second half
9540       Src.ShuffleVec =
9541           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9542                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9543       Src.WindowBase = -NumSrcElts;
9544     } else if (Src.MaxElt < NumSrcElts) {
9545       // The extraction can just take the first half
9546       Src.ShuffleVec =
9547           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9548                       DAG.getConstant(0, dl, MVT::i64));
9549     } else {
9550       // An actual VEXT is needed
9551       SDValue VEXTSrc1 =
9552           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9553                       DAG.getConstant(0, dl, MVT::i64));
9554       SDValue VEXTSrc2 =
9555           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9556                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9557       unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
9558 
9559       if (!SrcVT.is64BitVector()) {
9560         LLVM_DEBUG(
9561           dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "
9562                     "for SVE vectors.");
9563         return SDValue();
9564       }
9565 
9566       Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
9567                                    VEXTSrc2,
9568                                    DAG.getConstant(Imm, dl, MVT::i32));
9569       Src.WindowBase = -Src.MinElt;
9570     }
9571   }
9572 
9573   // Another possible incompatibility occurs from the vector element types. We
9574   // can fix this by bitcasting the source vectors to the same type we intend
9575   // for the shuffle.
9576   for (auto &Src : Sources) {
9577     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
9578     if (SrcEltTy == SmallestEltTy)
9579       continue;
9580     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
9581     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
9582     Src.WindowScale =
9583         SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9584     Src.WindowBase *= Src.WindowScale;
9585   }
9586 
9587   // Final check before we try to actually produce a shuffle.
9588   LLVM_DEBUG(for (auto Src
9589                   : Sources)
9590                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
9591 
9592   // The stars all align, our next step is to produce the mask for the shuffle.
9593   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
9594   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
9595   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
9596     SDValue Entry = Op.getOperand(i);
9597     if (Entry.isUndef())
9598       continue;
9599 
9600     auto Src = find(Sources, Entry.getOperand(0));
9601     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
9602 
9603     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
9604     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
9605     // segment.
9606     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
9607     int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
9608                                VT.getScalarSizeInBits());
9609     int LanesDefined = BitsDefined / BitsPerShuffleLane;
9610 
9611     // This source is expected to fill ResMultiplier lanes of the final shuffle,
9612     // starting at the appropriate offset.
9613     int *LaneMask = &Mask[i * ResMultiplier];
9614 
9615     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
9616     ExtractBase += NumElts * (Src - Sources.begin());
9617     for (int j = 0; j < LanesDefined; ++j)
9618       LaneMask[j] = ExtractBase + j;
9619   }
9620 
9621   // Final check before we try to produce nonsense...
9622   if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
9623     LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
9624     return SDValue();
9625   }
9626 
9627   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
9628   for (unsigned i = 0; i < Sources.size(); ++i)
9629     ShuffleOps[i] = Sources[i].ShuffleVec;
9630 
9631   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
9632                                          ShuffleOps[1], Mask);
9633   SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
9634 
9635   LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
9636              dbgs() << "Reshuffle, creating node: "; V.dump(););
9637 
9638   return V;
9639 }
9640 
9641 // check if an EXT instruction can handle the shuffle mask when the
9642 // vector sources of the shuffle are the same.
9643 static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
9644   unsigned NumElts = VT.getVectorNumElements();
9645 
9646   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
9647   if (M[0] < 0)
9648     return false;
9649 
9650   Imm = M[0];
9651 
9652   // If this is a VEXT shuffle, the immediate value is the index of the first
9653   // element.  The other shuffle indices must be the successive elements after
9654   // the first one.
9655   unsigned ExpectedElt = Imm;
9656   for (unsigned i = 1; i < NumElts; ++i) {
9657     // Increment the expected index.  If it wraps around, just follow it
9658     // back to index zero and keep going.
9659     ++ExpectedElt;
9660     if (ExpectedElt == NumElts)
9661       ExpectedElt = 0;
9662 
9663     if (M[i] < 0)
9664       continue; // ignore UNDEF indices
9665     if (ExpectedElt != static_cast<unsigned>(M[i]))
9666       return false;
9667   }
9668 
9669   return true;
9670 }
9671 
9672 // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
9673 // v4i32s. This is really a truncate, which we can construct out of (legal)
9674 // concats and truncate nodes.
9675 static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {
9676   if (V.getValueType() != MVT::v16i8)
9677     return SDValue();
9678   assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR");
9679 
9680   for (unsigned X = 0; X < 4; X++) {
9681     // Check the first item in each group is an extract from lane 0 of a v4i32
9682     // or v4i16.
9683     SDValue BaseExt = V.getOperand(X * 4);
9684     if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9685         (BaseExt.getOperand(0).getValueType() != MVT::v4i16 &&
9686          BaseExt.getOperand(0).getValueType() != MVT::v4i32) ||
9687         !isa<ConstantSDNode>(BaseExt.getOperand(1)) ||
9688         BaseExt.getConstantOperandVal(1) != 0)
9689       return SDValue();
9690     SDValue Base = BaseExt.getOperand(0);
9691     // And check the other items are extracts from the same vector.
9692     for (unsigned Y = 1; Y < 4; Y++) {
9693       SDValue Ext = V.getOperand(X * 4 + Y);
9694       if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9695           Ext.getOperand(0) != Base ||
9696           !isa<ConstantSDNode>(Ext.getOperand(1)) ||
9697           Ext.getConstantOperandVal(1) != Y)
9698         return SDValue();
9699     }
9700   }
9701 
9702   // Turn the buildvector into a series of truncates and concates, which will
9703   // become uzip1's. Any v4i32s we found get truncated to v4i16, which are
9704   // concat together to produce 2 v8i16. These are both truncated and concat
9705   // together.
9706   SDLoc DL(V);
9707   SDValue Trunc[4] = {
9708       V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0),
9709       V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)};
9710   for (int I = 0; I < 4; I++)
9711     if (Trunc[I].getValueType() == MVT::v4i32)
9712       Trunc[I] = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, Trunc[I]);
9713   SDValue Concat0 =
9714       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]);
9715   SDValue Concat1 =
9716       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]);
9717   SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0);
9718   SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1);
9719   return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1);
9720 }
9721 
9722 /// Check if a vector shuffle corresponds to a DUP instructions with a larger
9723 /// element width than the vector lane type. If that is the case the function
9724 /// returns true and writes the value of the DUP instruction lane operand into
9725 /// DupLaneOp
9726 static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
9727                           unsigned &DupLaneOp) {
9728   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9729          "Only possible block sizes for wide DUP are: 16, 32, 64");
9730 
9731   if (BlockSize <= VT.getScalarSizeInBits())
9732     return false;
9733   if (BlockSize % VT.getScalarSizeInBits() != 0)
9734     return false;
9735   if (VT.getSizeInBits() % BlockSize != 0)
9736     return false;
9737 
9738   size_t SingleVecNumElements = VT.getVectorNumElements();
9739   size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits();
9740   size_t NumBlocks = VT.getSizeInBits() / BlockSize;
9741 
9742   // We are looking for masks like
9743   // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element
9744   // might be replaced by 'undefined'. BlockIndices will eventually contain
9745   // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7]
9746   // for the above examples)
9747   SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1);
9748   for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++)
9749     for (size_t I = 0; I < NumEltsPerBlock; I++) {
9750       int Elt = M[BlockIndex * NumEltsPerBlock + I];
9751       if (Elt < 0)
9752         continue;
9753       // For now we don't support shuffles that use the second operand
9754       if ((unsigned)Elt >= SingleVecNumElements)
9755         return false;
9756       if (BlockElts[I] < 0)
9757         BlockElts[I] = Elt;
9758       else if (BlockElts[I] != Elt)
9759         return false;
9760     }
9761 
9762   // We found a candidate block (possibly with some undefs). It must be a
9763   // sequence of consecutive integers starting with a value divisible by
9764   // NumEltsPerBlock with some values possibly replaced by undef-s.
9765 
9766   // Find first non-undef element
9767   auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; });
9768   assert(FirstRealEltIter != BlockElts.end() &&
9769          "Shuffle with all-undefs must have been caught by previous cases, "
9770          "e.g. isSplat()");
9771   if (FirstRealEltIter == BlockElts.end()) {
9772     DupLaneOp = 0;
9773     return true;
9774   }
9775 
9776   // Index of FirstRealElt in BlockElts
9777   size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin();
9778 
9779   if ((unsigned)*FirstRealEltIter < FirstRealIndex)
9780     return false;
9781   // BlockElts[0] must have the following value if it isn't undef:
9782   size_t Elt0 = *FirstRealEltIter - FirstRealIndex;
9783 
9784   // Check the first element
9785   if (Elt0 % NumEltsPerBlock != 0)
9786     return false;
9787   // Check that the sequence indeed consists of consecutive integers (modulo
9788   // undefs)
9789   for (size_t I = 0; I < NumEltsPerBlock; I++)
9790     if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I)
9791       return false;
9792 
9793   DupLaneOp = Elt0 / NumEltsPerBlock;
9794   return true;
9795 }
9796 
9797 // check if an EXT instruction can handle the shuffle mask when the
9798 // vector sources of the shuffle are different.
9799 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
9800                       unsigned &Imm) {
9801   // Look for the first non-undef element.
9802   const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
9803 
9804   // Benefit form APInt to handle overflow when calculating expected element.
9805   unsigned NumElts = VT.getVectorNumElements();
9806   unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
9807   APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
9808   // The following shuffle indices must be the successive elements after the
9809   // first real element.
9810   const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(),
9811       [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;});
9812   if (FirstWrongElt != M.end())
9813     return false;
9814 
9815   // The index of an EXT is the first element if it is not UNDEF.
9816   // Watch out for the beginning UNDEFs. The EXT index should be the expected
9817   // value of the first element.  E.g.
9818   // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
9819   // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
9820   // ExpectedElt is the last mask index plus 1.
9821   Imm = ExpectedElt.getZExtValue();
9822 
9823   // There are two difference cases requiring to reverse input vectors.
9824   // For example, for vector <4 x i32> we have the following cases,
9825   // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
9826   // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
9827   // For both cases, we finally use mask <5, 6, 7, 0>, which requires
9828   // to reverse two input vectors.
9829   if (Imm < NumElts)
9830     ReverseEXT = true;
9831   else
9832     Imm -= NumElts;
9833 
9834   return true;
9835 }
9836 
9837 /// isREVMask - Check if a vector shuffle corresponds to a REV
9838 /// instruction with the specified blocksize.  (The order of the elements
9839 /// within each block of the vector is reversed.)
9840 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
9841   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9842          "Only possible block sizes for REV are: 16, 32, 64");
9843 
9844   unsigned EltSz = VT.getScalarSizeInBits();
9845   if (EltSz == 64)
9846     return false;
9847 
9848   unsigned NumElts = VT.getVectorNumElements();
9849   unsigned BlockElts = M[0] + 1;
9850   // If the first shuffle index is UNDEF, be optimistic.
9851   if (M[0] < 0)
9852     BlockElts = BlockSize / EltSz;
9853 
9854   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
9855     return false;
9856 
9857   for (unsigned i = 0; i < NumElts; ++i) {
9858     if (M[i] < 0)
9859       continue; // ignore UNDEF indices
9860     if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
9861       return false;
9862   }
9863 
9864   return true;
9865 }
9866 
9867 static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9868   unsigned NumElts = VT.getVectorNumElements();
9869   if (NumElts % 2 != 0)
9870     return false;
9871   WhichResult = (M[0] == 0 ? 0 : 1);
9872   unsigned Idx = WhichResult * NumElts / 2;
9873   for (unsigned i = 0; i != NumElts; i += 2) {
9874     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9875         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
9876       return false;
9877     Idx += 1;
9878   }
9879 
9880   return true;
9881 }
9882 
9883 static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9884   unsigned NumElts = VT.getVectorNumElements();
9885   WhichResult = (M[0] == 0 ? 0 : 1);
9886   for (unsigned i = 0; i != NumElts; ++i) {
9887     if (M[i] < 0)
9888       continue; // ignore UNDEF indices
9889     if ((unsigned)M[i] != 2 * i + WhichResult)
9890       return false;
9891   }
9892 
9893   return true;
9894 }
9895 
9896 static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9897   unsigned NumElts = VT.getVectorNumElements();
9898   if (NumElts % 2 != 0)
9899     return false;
9900   WhichResult = (M[0] == 0 ? 0 : 1);
9901   for (unsigned i = 0; i < NumElts; i += 2) {
9902     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9903         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
9904       return false;
9905   }
9906   return true;
9907 }
9908 
9909 /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
9910 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9911 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
9912 static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9913   unsigned NumElts = VT.getVectorNumElements();
9914   if (NumElts % 2 != 0)
9915     return false;
9916   WhichResult = (M[0] == 0 ? 0 : 1);
9917   unsigned Idx = WhichResult * NumElts / 2;
9918   for (unsigned i = 0; i != NumElts; i += 2) {
9919     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9920         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
9921       return false;
9922     Idx += 1;
9923   }
9924 
9925   return true;
9926 }
9927 
9928 /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
9929 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9930 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
9931 static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9932   unsigned Half = VT.getVectorNumElements() / 2;
9933   WhichResult = (M[0] == 0 ? 0 : 1);
9934   for (unsigned j = 0; j != 2; ++j) {
9935     unsigned Idx = WhichResult;
9936     for (unsigned i = 0; i != Half; ++i) {
9937       int MIdx = M[i + j * Half];
9938       if (MIdx >= 0 && (unsigned)MIdx != Idx)
9939         return false;
9940       Idx += 2;
9941     }
9942   }
9943 
9944   return true;
9945 }
9946 
9947 /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
9948 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9949 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
9950 static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9951   unsigned NumElts = VT.getVectorNumElements();
9952   if (NumElts % 2 != 0)
9953     return false;
9954   WhichResult = (M[0] == 0 ? 0 : 1);
9955   for (unsigned i = 0; i < NumElts; i += 2) {
9956     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9957         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
9958       return false;
9959   }
9960   return true;
9961 }
9962 
9963 static bool isINSMask(ArrayRef<int> M, int NumInputElements,
9964                       bool &DstIsLeft, int &Anomaly) {
9965   if (M.size() != static_cast<size_t>(NumInputElements))
9966     return false;
9967 
9968   int NumLHSMatch = 0, NumRHSMatch = 0;
9969   int LastLHSMismatch = -1, LastRHSMismatch = -1;
9970 
9971   for (int i = 0; i < NumInputElements; ++i) {
9972     if (M[i] == -1) {
9973       ++NumLHSMatch;
9974       ++NumRHSMatch;
9975       continue;
9976     }
9977 
9978     if (M[i] == i)
9979       ++NumLHSMatch;
9980     else
9981       LastLHSMismatch = i;
9982 
9983     if (M[i] == i + NumInputElements)
9984       ++NumRHSMatch;
9985     else
9986       LastRHSMismatch = i;
9987   }
9988 
9989   if (NumLHSMatch == NumInputElements - 1) {
9990     DstIsLeft = true;
9991     Anomaly = LastLHSMismatch;
9992     return true;
9993   } else if (NumRHSMatch == NumInputElements - 1) {
9994     DstIsLeft = false;
9995     Anomaly = LastRHSMismatch;
9996     return true;
9997   }
9998 
9999   return false;
10000 }
10001 
10002 static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
10003   if (VT.getSizeInBits() != 128)
10004     return false;
10005 
10006   unsigned NumElts = VT.getVectorNumElements();
10007 
10008   for (int I = 0, E = NumElts / 2; I != E; I++) {
10009     if (Mask[I] != I)
10010       return false;
10011   }
10012 
10013   int Offset = NumElts / 2;
10014   for (int I = NumElts / 2, E = NumElts; I != E; I++) {
10015     if (Mask[I] != I + SplitLHS * Offset)
10016       return false;
10017   }
10018 
10019   return true;
10020 }
10021 
10022 static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
10023   SDLoc DL(Op);
10024   EVT VT = Op.getValueType();
10025   SDValue V0 = Op.getOperand(0);
10026   SDValue V1 = Op.getOperand(1);
10027   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10028 
10029   if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
10030       VT.getVectorElementType() != V1.getValueType().getVectorElementType())
10031     return SDValue();
10032 
10033   bool SplitV0 = V0.getValueSizeInBits() == 128;
10034 
10035   if (!isConcatMask(Mask, VT, SplitV0))
10036     return SDValue();
10037 
10038   EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
10039   if (SplitV0) {
10040     V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
10041                      DAG.getConstant(0, DL, MVT::i64));
10042   }
10043   if (V1.getValueSizeInBits() == 128) {
10044     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
10045                      DAG.getConstant(0, DL, MVT::i64));
10046   }
10047   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
10048 }
10049 
10050 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
10051 /// the specified operations to build the shuffle. ID is the perfect-shuffle
10052 //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
10053 //table entry and LHS/RHS are the immediate inputs for this stage of the
10054 //shuffle.
10055 static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
10056                                       SDValue V2, unsigned PFEntry, SDValue LHS,
10057                                       SDValue RHS, SelectionDAG &DAG,
10058                                       const SDLoc &dl) {
10059   unsigned OpNum = (PFEntry >> 26) & 0x0F;
10060   unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
10061   unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
10062 
10063   enum {
10064     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
10065     OP_VREV,
10066     OP_VDUP0,
10067     OP_VDUP1,
10068     OP_VDUP2,
10069     OP_VDUP3,
10070     OP_VEXT1,
10071     OP_VEXT2,
10072     OP_VEXT3,
10073     OP_VUZPL,  // VUZP, left result
10074     OP_VUZPR,  // VUZP, right result
10075     OP_VZIPL,  // VZIP, left result
10076     OP_VZIPR,  // VZIP, right result
10077     OP_VTRNL,  // VTRN, left result
10078     OP_VTRNR,  // VTRN, right result
10079     OP_MOVLANE // Move lane. RHSID is the lane to move into
10080   };
10081 
10082   if (OpNum == OP_COPY) {
10083     if (LHSID == (1 * 9 + 2) * 9 + 3)
10084       return LHS;
10085     assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
10086     return RHS;
10087   }
10088 
10089   if (OpNum == OP_MOVLANE) {
10090     // Decompose a PerfectShuffle ID to get the Mask for lane Elt
10091     auto getPFIDLane = [](unsigned ID, int Elt) -> int {
10092       assert(Elt < 4 && "Expected Perfect Lanes to be less than 4");
10093       Elt = 3 - Elt;
10094       while (Elt > 0) {
10095         ID /= 9;
10096         Elt--;
10097       }
10098       return (ID % 9 == 8) ? -1 : ID % 9;
10099     };
10100 
10101     // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
10102     // get the lane to move from from the PFID, which is always from the
10103     // original vectors (V1 or V2).
10104     SDValue OpLHS = GeneratePerfectShuffle(
10105         LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
10106     EVT VT = OpLHS.getValueType();
10107     assert(RHSID < 8 && "Expected a lane index for RHSID!");
10108     unsigned ExtLane = 0;
10109     SDValue Input;
10110 
10111     // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs
10112     // convert into a higher type.
10113     if (RHSID & 0x4) {
10114       int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1;
10115       if (MaskElt == -1)
10116         MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1;
10117       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10118       ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2);
10119       Input = MaskElt < 2 ? V1 : V2;
10120       if (VT.getScalarSizeInBits() == 16) {
10121         Input = DAG.getBitcast(MVT::v2f32, Input);
10122         OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
10123       } else {
10124         assert(VT.getScalarSizeInBits() == 32 &&
10125                "Expected 16 or 32 bit shuffle elemements");
10126         Input = DAG.getBitcast(MVT::v2f64, Input);
10127         OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
10128       }
10129     } else {
10130       int MaskElt = getPFIDLane(ID, RHSID);
10131       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10132       ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4);
10133       Input = MaskElt < 4 ? V1 : V2;
10134       // Be careful about creating illegal types. Use f16 instead of i16.
10135       if (VT == MVT::v4i16) {
10136         Input = DAG.getBitcast(MVT::v4f16, Input);
10137         OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS);
10138       }
10139     }
10140     SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
10141                               Input.getValueType().getVectorElementType(),
10142                               Input, DAG.getVectorIdxConstant(ExtLane, dl));
10143     SDValue Ins =
10144         DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS,
10145                     Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl));
10146     return DAG.getBitcast(VT, Ins);
10147   }
10148 
10149   SDValue OpLHS, OpRHS;
10150   OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS,
10151                                  RHS, DAG, dl);
10152   OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS,
10153                                  RHS, DAG, dl);
10154   EVT VT = OpLHS.getValueType();
10155 
10156   switch (OpNum) {
10157   default:
10158     llvm_unreachable("Unknown shuffle opcode!");
10159   case OP_VREV:
10160     // VREV divides the vector in half and swaps within the half.
10161     if (VT.getVectorElementType() == MVT::i32 ||
10162         VT.getVectorElementType() == MVT::f32)
10163       return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
10164     // vrev <4 x i16> -> REV32
10165     if (VT.getVectorElementType() == MVT::i16 ||
10166         VT.getVectorElementType() == MVT::f16 ||
10167         VT.getVectorElementType() == MVT::bf16)
10168       return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
10169     // vrev <4 x i8> -> REV16
10170     assert(VT.getVectorElementType() == MVT::i8);
10171     return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
10172   case OP_VDUP0:
10173   case OP_VDUP1:
10174   case OP_VDUP2:
10175   case OP_VDUP3: {
10176     EVT EltTy = VT.getVectorElementType();
10177     unsigned Opcode;
10178     if (EltTy == MVT::i8)
10179       Opcode = AArch64ISD::DUPLANE8;
10180     else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16)
10181       Opcode = AArch64ISD::DUPLANE16;
10182     else if (EltTy == MVT::i32 || EltTy == MVT::f32)
10183       Opcode = AArch64ISD::DUPLANE32;
10184     else if (EltTy == MVT::i64 || EltTy == MVT::f64)
10185       Opcode = AArch64ISD::DUPLANE64;
10186     else
10187       llvm_unreachable("Invalid vector element type?");
10188 
10189     if (VT.getSizeInBits() == 64)
10190       OpLHS = WidenVector(OpLHS, DAG);
10191     SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
10192     return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
10193   }
10194   case OP_VEXT1:
10195   case OP_VEXT2:
10196   case OP_VEXT3: {
10197     unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
10198     return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
10199                        DAG.getConstant(Imm, dl, MVT::i32));
10200   }
10201   case OP_VUZPL:
10202     return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
10203                        OpRHS);
10204   case OP_VUZPR:
10205     return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
10206                        OpRHS);
10207   case OP_VZIPL:
10208     return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
10209                        OpRHS);
10210   case OP_VZIPR:
10211     return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
10212                        OpRHS);
10213   case OP_VTRNL:
10214     return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
10215                        OpRHS);
10216   case OP_VTRNR:
10217     return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
10218                        OpRHS);
10219   }
10220 }
10221 
10222 static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
10223                            SelectionDAG &DAG) {
10224   // Check to see if we can use the TBL instruction.
10225   SDValue V1 = Op.getOperand(0);
10226   SDValue V2 = Op.getOperand(1);
10227   SDLoc DL(Op);
10228 
10229   EVT EltVT = Op.getValueType().getVectorElementType();
10230   unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
10231 
10232   bool Swap = false;
10233   if (V1.isUndef() || isZerosVector(V1.getNode())) {
10234     std::swap(V1, V2);
10235     Swap = true;
10236   }
10237 
10238   // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
10239   // out of range values with 0s. We do need to make sure that any out-of-range
10240   // values are really out-of-range for a v16i8 vector.
10241   bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
10242   MVT IndexVT = MVT::v8i8;
10243   unsigned IndexLen = 8;
10244   if (Op.getValueSizeInBits() == 128) {
10245     IndexVT = MVT::v16i8;
10246     IndexLen = 16;
10247   }
10248 
10249   SmallVector<SDValue, 8> TBLMask;
10250   for (int Val : ShuffleMask) {
10251     for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
10252       unsigned Offset = Byte + Val * BytesPerElt;
10253       if (Swap)
10254         Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen;
10255       if (IsUndefOrZero && Offset >= IndexLen)
10256         Offset = 255;
10257       TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
10258     }
10259   }
10260 
10261   SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
10262   SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
10263 
10264   SDValue Shuffle;
10265   if (IsUndefOrZero) {
10266     if (IndexLen == 8)
10267       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
10268     Shuffle = DAG.getNode(
10269         ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10270         DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10271         DAG.getBuildVector(IndexVT, DL,
10272                            makeArrayRef(TBLMask.data(), IndexLen)));
10273   } else {
10274     if (IndexLen == 8) {
10275       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
10276       Shuffle = DAG.getNode(
10277           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10278           DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10279           DAG.getBuildVector(IndexVT, DL,
10280                              makeArrayRef(TBLMask.data(), IndexLen)));
10281     } else {
10282       // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
10283       // cannot currently represent the register constraints on the input
10284       // table registers.
10285       //  Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
10286       //                   DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
10287       //                   IndexLen));
10288       Shuffle = DAG.getNode(
10289           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10290           DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
10291           V2Cst, DAG.getBuildVector(IndexVT, DL,
10292                                     makeArrayRef(TBLMask.data(), IndexLen)));
10293     }
10294   }
10295   return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
10296 }
10297 
10298 static unsigned getDUPLANEOp(EVT EltType) {
10299   if (EltType == MVT::i8)
10300     return AArch64ISD::DUPLANE8;
10301   if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16)
10302     return AArch64ISD::DUPLANE16;
10303   if (EltType == MVT::i32 || EltType == MVT::f32)
10304     return AArch64ISD::DUPLANE32;
10305   if (EltType == MVT::i64 || EltType == MVT::f64)
10306     return AArch64ISD::DUPLANE64;
10307 
10308   llvm_unreachable("Invalid vector element type?");
10309 }
10310 
10311 static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
10312                             unsigned Opcode, SelectionDAG &DAG) {
10313   // Try to eliminate a bitcasted extract subvector before a DUPLANE.
10314   auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) {
10315     // Match: dup (bitcast (extract_subv X, C)), LaneC
10316     if (BitCast.getOpcode() != ISD::BITCAST ||
10317         BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR)
10318       return false;
10319 
10320     // The extract index must align in the destination type. That may not
10321     // happen if the bitcast is from narrow to wide type.
10322     SDValue Extract = BitCast.getOperand(0);
10323     unsigned ExtIdx = Extract.getConstantOperandVal(1);
10324     unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits();
10325     unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth;
10326     unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits();
10327     if (ExtIdxInBits % CastedEltBitWidth != 0)
10328       return false;
10329 
10330     // Can't handle cases where vector size is not 128-bit
10331     if (!Extract.getOperand(0).getValueType().is128BitVector())
10332       return false;
10333 
10334     // Update the lane value by offsetting with the scaled extract index.
10335     LaneC += ExtIdxInBits / CastedEltBitWidth;
10336 
10337     // Determine the casted vector type of the wide vector input.
10338     // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC'
10339     // Examples:
10340     // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3
10341     // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5
10342     unsigned SrcVecNumElts =
10343         Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth;
10344     CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(),
10345                               SrcVecNumElts);
10346     return true;
10347   };
10348   MVT CastVT;
10349   if (getScaledOffsetDup(V, Lane, CastVT)) {
10350     V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
10351   } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10352              V.getOperand(0).getValueType().is128BitVector()) {
10353     // The lane is incremented by the index of the extract.
10354     // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
10355     Lane += V.getConstantOperandVal(1);
10356     V = V.getOperand(0);
10357   } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
10358     // The lane is decremented if we are splatting from the 2nd operand.
10359     // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
10360     unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
10361     Lane -= Idx * VT.getVectorNumElements() / 2;
10362     V = WidenVector(V.getOperand(Idx), DAG);
10363   } else if (VT.getSizeInBits() == 64) {
10364     // Widen the operand to 128-bit register with undef.
10365     V = WidenVector(V, DAG);
10366   }
10367   return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64));
10368 }
10369 
10370 // Return true if we can get a new shuffle mask by checking the parameter mask
10371 // array to test whether every two adjacent mask values are continuous and
10372 // starting from an even number.
10373 static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
10374                            SmallVectorImpl<int> &NewMask) {
10375   unsigned NumElts = VT.getVectorNumElements();
10376   if (NumElts % 2 != 0)
10377     return false;
10378 
10379   NewMask.clear();
10380   for (unsigned i = 0; i < NumElts; i += 2) {
10381     int M0 = M[i];
10382     int M1 = M[i + 1];
10383 
10384     // If both elements are undef, new mask is undef too.
10385     if (M0 == -1 && M1 == -1) {
10386       NewMask.push_back(-1);
10387       continue;
10388     }
10389 
10390     if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
10391       NewMask.push_back(M1 / 2);
10392       continue;
10393     }
10394 
10395     if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
10396       NewMask.push_back(M0 / 2);
10397       continue;
10398     }
10399 
10400     NewMask.clear();
10401     return false;
10402   }
10403 
10404   assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
10405   return true;
10406 }
10407 
10408 // Try to widen element type to get a new mask value for a better permutation
10409 // sequence, so that we can use NEON shuffle instructions, such as zip1/2,
10410 // UZP1/2, TRN1/2, REV, INS, etc.
10411 // For example:
10412 //  shufflevector <4 x i32> %a, <4 x i32> %b,
10413 //                <4 x i32> <i32 6, i32 7, i32 2, i32 3>
10414 // is equivalent to:
10415 //  shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
10416 // Finally, we can get:
10417 //  mov     v0.d[0], v1.d[1]
10418 static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
10419   SDLoc DL(Op);
10420   EVT VT = Op.getValueType();
10421   EVT ScalarVT = VT.getVectorElementType();
10422   unsigned ElementSize = ScalarVT.getFixedSizeInBits();
10423   SDValue V0 = Op.getOperand(0);
10424   SDValue V1 = Op.getOperand(1);
10425   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10426 
10427   // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ...
10428   // We need to make sure the wider element type is legal. Thus, ElementSize
10429   // should be not larger than 32 bits, and i1 type should also be excluded.
10430   if (ElementSize > 32 || ElementSize == 1)
10431     return SDValue();
10432 
10433   SmallVector<int, 8> NewMask;
10434   if (isWideTypeMask(Mask, VT, NewMask)) {
10435     MVT NewEltVT = VT.isFloatingPoint()
10436                        ? MVT::getFloatingPointVT(ElementSize * 2)
10437                        : MVT::getIntegerVT(ElementSize * 2);
10438     MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10439     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
10440       V0 = DAG.getBitcast(NewVT, V0);
10441       V1 = DAG.getBitcast(NewVT, V1);
10442       return DAG.getBitcast(VT,
10443                             DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask));
10444     }
10445   }
10446 
10447   return SDValue();
10448 }
10449 
10450 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
10451                                                    SelectionDAG &DAG) const {
10452   SDLoc dl(Op);
10453   EVT VT = Op.getValueType();
10454 
10455   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
10456 
10457   if (useSVEForFixedLengthVectorVT(VT))
10458     return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
10459 
10460   // Convert shuffles that are directly supported on NEON to target-specific
10461   // DAG nodes, instead of keeping them as shuffles and matching them again
10462   // during code selection.  This is more efficient and avoids the possibility
10463   // of inconsistencies between legalization and selection.
10464   ArrayRef<int> ShuffleMask = SVN->getMask();
10465 
10466   SDValue V1 = Op.getOperand(0);
10467   SDValue V2 = Op.getOperand(1);
10468 
10469   assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!");
10470   assert(ShuffleMask.size() == VT.getVectorNumElements() &&
10471          "Unexpected VECTOR_SHUFFLE mask size!");
10472 
10473   if (SVN->isSplat()) {
10474     int Lane = SVN->getSplatIndex();
10475     // If this is undef splat, generate it via "just" vdup, if possible.
10476     if (Lane == -1)
10477       Lane = 0;
10478 
10479     if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
10480       return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
10481                          V1.getOperand(0));
10482     // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
10483     // constant. If so, we can just reference the lane's definition directly.
10484     if (V1.getOpcode() == ISD::BUILD_VECTOR &&
10485         !isa<ConstantSDNode>(V1.getOperand(Lane)))
10486       return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
10487 
10488     // Otherwise, duplicate from the lane of the input vector.
10489     unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
10490     return constructDup(V1, Lane, dl, VT, Opcode, DAG);
10491   }
10492 
10493   // Check if the mask matches a DUP for a wider element
10494   for (unsigned LaneSize : {64U, 32U, 16U}) {
10495     unsigned Lane = 0;
10496     if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) {
10497       unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64
10498                                        : LaneSize == 32 ? AArch64ISD::DUPLANE32
10499                                                         : AArch64ISD::DUPLANE16;
10500       // Cast V1 to an integer vector with required lane size
10501       MVT NewEltTy = MVT::getIntegerVT(LaneSize);
10502       unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
10503       MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
10504       V1 = DAG.getBitcast(NewVecTy, V1);
10505       // Constuct the DUP instruction
10506       V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
10507       // Cast back to the original type
10508       return DAG.getBitcast(VT, V1);
10509     }
10510   }
10511 
10512   if (isREVMask(ShuffleMask, VT, 64))
10513     return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
10514   if (isREVMask(ShuffleMask, VT, 32))
10515     return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
10516   if (isREVMask(ShuffleMask, VT, 16))
10517     return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
10518 
10519   if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) ||
10520        (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) &&
10521       ShuffleVectorInst::isReverseMask(ShuffleMask)) {
10522     SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1);
10523     return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev,
10524                        DAG.getConstant(8, dl, MVT::i32));
10525   }
10526 
10527   bool ReverseEXT = false;
10528   unsigned Imm;
10529   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
10530     if (ReverseEXT)
10531       std::swap(V1, V2);
10532     Imm *= getExtFactor(V1);
10533     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
10534                        DAG.getConstant(Imm, dl, MVT::i32));
10535   } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
10536     Imm *= getExtFactor(V1);
10537     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
10538                        DAG.getConstant(Imm, dl, MVT::i32));
10539   }
10540 
10541   unsigned WhichResult;
10542   if (isZIPMask(ShuffleMask, VT, WhichResult)) {
10543     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10544     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10545   }
10546   if (isUZPMask(ShuffleMask, VT, WhichResult)) {
10547     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10548     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10549   }
10550   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
10551     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10552     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10553   }
10554 
10555   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10556     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10557     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10558   }
10559   if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10560     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10561     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10562   }
10563   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10564     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10565     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10566   }
10567 
10568   if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
10569     return Concat;
10570 
10571   bool DstIsLeft;
10572   int Anomaly;
10573   int NumInputElements = V1.getValueType().getVectorNumElements();
10574   if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
10575     SDValue DstVec = DstIsLeft ? V1 : V2;
10576     SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
10577 
10578     SDValue SrcVec = V1;
10579     int SrcLane = ShuffleMask[Anomaly];
10580     if (SrcLane >= NumInputElements) {
10581       SrcVec = V2;
10582       SrcLane -= VT.getVectorNumElements();
10583     }
10584     SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
10585 
10586     EVT ScalarVT = VT.getVectorElementType();
10587 
10588     if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger())
10589       ScalarVT = MVT::i32;
10590 
10591     return DAG.getNode(
10592         ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10593         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
10594         DstLaneV);
10595   }
10596 
10597   if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG))
10598     return NewSD;
10599 
10600   // If the shuffle is not directly supported and it has 4 elements, use
10601   // the PerfectShuffle-generated table to synthesize it from other shuffles.
10602   unsigned NumElts = VT.getVectorNumElements();
10603   if (NumElts == 4) {
10604     unsigned PFIndexes[4];
10605     for (unsigned i = 0; i != 4; ++i) {
10606       if (ShuffleMask[i] < 0)
10607         PFIndexes[i] = 8;
10608       else
10609         PFIndexes[i] = ShuffleMask[i];
10610     }
10611 
10612     // Compute the index in the perfect shuffle table.
10613     unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10614                             PFIndexes[2] * 9 + PFIndexes[3];
10615     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10616     return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG,
10617                                   dl);
10618   }
10619 
10620   return GenerateTBL(Op, ShuffleMask, DAG);
10621 }
10622 
10623 SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
10624                                                  SelectionDAG &DAG) const {
10625   EVT VT = Op.getValueType();
10626 
10627   if (useSVEForFixedLengthVectorVT(VT))
10628     return LowerToScalableOp(Op, DAG);
10629 
10630   assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
10631          "Unexpected vector type!");
10632 
10633   // We can handle the constant cases during isel.
10634   if (isa<ConstantSDNode>(Op.getOperand(0)))
10635     return Op;
10636 
10637   // There isn't a natural way to handle the general i1 case, so we use some
10638   // trickery with whilelo.
10639   SDLoc DL(Op);
10640   SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64);
10641   SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal,
10642                          DAG.getValueType(MVT::i1));
10643   SDValue ID =
10644       DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
10645   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
10646   if (VT == MVT::nxv1i1)
10647     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1,
10648                        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID,
10649                                    Zero, SplatVal),
10650                        Zero);
10651   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal);
10652 }
10653 
10654 SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
10655                                              SelectionDAG &DAG) const {
10656   SDLoc DL(Op);
10657 
10658   EVT VT = Op.getValueType();
10659   if (!isTypeLegal(VT) || !VT.isScalableVector())
10660     return SDValue();
10661 
10662   // Current lowering only supports the SVE-ACLE types.
10663   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
10664     return SDValue();
10665 
10666   // The DUPQ operation is indepedent of element type so normalise to i64s.
10667   SDValue Idx128 = Op.getOperand(2);
10668 
10669   // DUPQ can be used when idx is in range.
10670   auto *CIdx = dyn_cast<ConstantSDNode>(Idx128);
10671   if (CIdx && (CIdx->getZExtValue() <= 3)) {
10672     SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64);
10673     return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI);
10674   }
10675 
10676   SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1));
10677 
10678   // The ACLE says this must produce the same result as:
10679   //   svtbl(data, svadd_x(svptrue_b64(),
10680   //                       svand_x(svptrue_b64(), svindex_u64(0, 1), 1),
10681   //                       index * 2))
10682   SDValue One = DAG.getConstant(1, DL, MVT::i64);
10683   SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One);
10684 
10685   // create the vector 0,1,0,1,...
10686   SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64);
10687   SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne);
10688 
10689   // create the vector idx64,idx64+1,idx64,idx64+1,...
10690   SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128);
10691   SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64);
10692   SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64);
10693 
10694   // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],...
10695   SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask);
10696   return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
10697 }
10698 
10699 
10700 static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
10701                                APInt &UndefBits) {
10702   EVT VT = BVN->getValueType(0);
10703   APInt SplatBits, SplatUndef;
10704   unsigned SplatBitSize;
10705   bool HasAnyUndefs;
10706   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10707     unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
10708 
10709     for (unsigned i = 0; i < NumSplats; ++i) {
10710       CnstBits <<= SplatBitSize;
10711       UndefBits <<= SplatBitSize;
10712       CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
10713       UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
10714     }
10715 
10716     return true;
10717   }
10718 
10719   return false;
10720 }
10721 
10722 // Try 64-bit splatted SIMD immediate.
10723 static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10724                                  const APInt &Bits) {
10725   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10726     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10727     EVT VT = Op.getValueType();
10728     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
10729 
10730     if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
10731       Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
10732 
10733       SDLoc dl(Op);
10734       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10735                                 DAG.getConstant(Value, dl, MVT::i32));
10736       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10737     }
10738   }
10739 
10740   return SDValue();
10741 }
10742 
10743 // Try 32-bit splatted SIMD immediate.
10744 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10745                                   const APInt &Bits,
10746                                   const SDValue *LHS = nullptr) {
10747   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10748     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10749     EVT VT = Op.getValueType();
10750     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10751     bool isAdvSIMDModImm = false;
10752     uint64_t Shift;
10753 
10754     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
10755       Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
10756       Shift = 0;
10757     }
10758     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
10759       Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
10760       Shift = 8;
10761     }
10762     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
10763       Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
10764       Shift = 16;
10765     }
10766     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
10767       Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
10768       Shift = 24;
10769     }
10770 
10771     if (isAdvSIMDModImm) {
10772       SDLoc dl(Op);
10773       SDValue Mov;
10774 
10775       if (LHS)
10776         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10777                           DAG.getConstant(Value, dl, MVT::i32),
10778                           DAG.getConstant(Shift, dl, MVT::i32));
10779       else
10780         Mov = DAG.getNode(NewOp, dl, MovTy,
10781                           DAG.getConstant(Value, dl, MVT::i32),
10782                           DAG.getConstant(Shift, dl, MVT::i32));
10783 
10784       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10785     }
10786   }
10787 
10788   return SDValue();
10789 }
10790 
10791 // Try 16-bit splatted SIMD immediate.
10792 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10793                                   const APInt &Bits,
10794                                   const SDValue *LHS = nullptr) {
10795   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10796     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10797     EVT VT = Op.getValueType();
10798     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
10799     bool isAdvSIMDModImm = false;
10800     uint64_t Shift;
10801 
10802     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
10803       Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
10804       Shift = 0;
10805     }
10806     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
10807       Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
10808       Shift = 8;
10809     }
10810 
10811     if (isAdvSIMDModImm) {
10812       SDLoc dl(Op);
10813       SDValue Mov;
10814 
10815       if (LHS)
10816         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10817                           DAG.getConstant(Value, dl, MVT::i32),
10818                           DAG.getConstant(Shift, dl, MVT::i32));
10819       else
10820         Mov = DAG.getNode(NewOp, dl, MovTy,
10821                           DAG.getConstant(Value, dl, MVT::i32),
10822                           DAG.getConstant(Shift, dl, MVT::i32));
10823 
10824       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10825     }
10826   }
10827 
10828   return SDValue();
10829 }
10830 
10831 // Try 32-bit splatted SIMD immediate with shifted ones.
10832 static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
10833                                     SelectionDAG &DAG, const APInt &Bits) {
10834   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10835     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10836     EVT VT = Op.getValueType();
10837     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10838     bool isAdvSIMDModImm = false;
10839     uint64_t Shift;
10840 
10841     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
10842       Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
10843       Shift = 264;
10844     }
10845     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
10846       Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
10847       Shift = 272;
10848     }
10849 
10850     if (isAdvSIMDModImm) {
10851       SDLoc dl(Op);
10852       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10853                                 DAG.getConstant(Value, dl, MVT::i32),
10854                                 DAG.getConstant(Shift, dl, MVT::i32));
10855       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10856     }
10857   }
10858 
10859   return SDValue();
10860 }
10861 
10862 // Try 8-bit splatted SIMD immediate.
10863 static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10864                                  const APInt &Bits) {
10865   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10866     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10867     EVT VT = Op.getValueType();
10868     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
10869 
10870     if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
10871       Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
10872 
10873       SDLoc dl(Op);
10874       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10875                                 DAG.getConstant(Value, dl, MVT::i32));
10876       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10877     }
10878   }
10879 
10880   return SDValue();
10881 }
10882 
10883 // Try FP splatted SIMD immediate.
10884 static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10885                                   const APInt &Bits) {
10886   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10887     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10888     EVT VT = Op.getValueType();
10889     bool isWide = (VT.getSizeInBits() == 128);
10890     MVT MovTy;
10891     bool isAdvSIMDModImm = false;
10892 
10893     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
10894       Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
10895       MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
10896     }
10897     else if (isWide &&
10898              (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
10899       Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
10900       MovTy = MVT::v2f64;
10901     }
10902 
10903     if (isAdvSIMDModImm) {
10904       SDLoc dl(Op);
10905       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10906                                 DAG.getConstant(Value, dl, MVT::i32));
10907       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10908     }
10909   }
10910 
10911   return SDValue();
10912 }
10913 
10914 // Specialized code to quickly find if PotentialBVec is a BuildVector that
10915 // consists of only the same constant int value, returned in reference arg
10916 // ConstVal
10917 static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
10918                                      uint64_t &ConstVal) {
10919   BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
10920   if (!Bvec)
10921     return false;
10922   ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
10923   if (!FirstElt)
10924     return false;
10925   EVT VT = Bvec->getValueType(0);
10926   unsigned NumElts = VT.getVectorNumElements();
10927   for (unsigned i = 1; i < NumElts; ++i)
10928     if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
10929       return false;
10930   ConstVal = FirstElt->getZExtValue();
10931   return true;
10932 }
10933 
10934 // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
10935 // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
10936 // BUILD_VECTORs with constant element C1, C2 is a constant, and:
10937 //   - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
10938 //   - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
10939 // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
10940 static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
10941   EVT VT = N->getValueType(0);
10942 
10943   if (!VT.isVector())
10944     return SDValue();
10945 
10946   SDLoc DL(N);
10947 
10948   SDValue And;
10949   SDValue Shift;
10950 
10951   SDValue FirstOp = N->getOperand(0);
10952   unsigned FirstOpc = FirstOp.getOpcode();
10953   SDValue SecondOp = N->getOperand(1);
10954   unsigned SecondOpc = SecondOp.getOpcode();
10955 
10956   // Is one of the operands an AND or a BICi? The AND may have been optimised to
10957   // a BICi in order to use an immediate instead of a register.
10958   // Is the other operand an shl or lshr? This will have been turned into:
10959   // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift.
10960   if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) &&
10961       (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) {
10962     And = FirstOp;
10963     Shift = SecondOp;
10964 
10965   } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) &&
10966              (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) {
10967     And = SecondOp;
10968     Shift = FirstOp;
10969   } else
10970     return SDValue();
10971 
10972   bool IsAnd = And.getOpcode() == ISD::AND;
10973   bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR;
10974 
10975   // Is the shift amount constant?
10976   ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
10977   if (!C2node)
10978     return SDValue();
10979 
10980   uint64_t C1;
10981   if (IsAnd) {
10982     // Is the and mask vector all constant?
10983     if (!isAllConstantBuildVector(And.getOperand(1), C1))
10984       return SDValue();
10985   } else {
10986     // Reconstruct the corresponding AND immediate from the two BICi immediates.
10987     ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1));
10988     ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2));
10989     assert(C1nodeImm && C1nodeShift);
10990     C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue());
10991   }
10992 
10993   // Is C1 == ~(Ones(ElemSizeInBits) << C2) or
10994   // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account
10995   // how much one can shift elements of a particular size?
10996   uint64_t C2 = C2node->getZExtValue();
10997   unsigned ElemSizeInBits = VT.getScalarSizeInBits();
10998   if (C2 > ElemSizeInBits)
10999     return SDValue();
11000 
11001   APInt C1AsAPInt(ElemSizeInBits, C1);
11002   APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2)
11003                                   : APInt::getLowBitsSet(ElemSizeInBits, C2);
11004   if (C1AsAPInt != RequiredC1)
11005     return SDValue();
11006 
11007   SDValue X = And.getOperand(0);
11008   SDValue Y = Shift.getOperand(0);
11009 
11010   unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
11011   SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1));
11012 
11013   LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
11014   LLVM_DEBUG(N->dump(&DAG));
11015   LLVM_DEBUG(dbgs() << "into: \n");
11016   LLVM_DEBUG(ResultSLI->dump(&DAG));
11017 
11018   ++NumShiftInserts;
11019   return ResultSLI;
11020 }
11021 
11022 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
11023                                              SelectionDAG &DAG) const {
11024   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11025     return LowerToScalableOp(Op, DAG);
11026 
11027   // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
11028   if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
11029     return Res;
11030 
11031   EVT VT = Op.getValueType();
11032 
11033   SDValue LHS = Op.getOperand(0);
11034   BuildVectorSDNode *BVN =
11035       dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
11036   if (!BVN) {
11037     // OR commutes, so try swapping the operands.
11038     LHS = Op.getOperand(1);
11039     BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
11040   }
11041   if (!BVN)
11042     return Op;
11043 
11044   APInt DefBits(VT.getSizeInBits(), 0);
11045   APInt UndefBits(VT.getSizeInBits(), 0);
11046   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11047     SDValue NewOp;
11048 
11049     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11050                                     DefBits, &LHS)) ||
11051         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11052                                     DefBits, &LHS)))
11053       return NewOp;
11054 
11055     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11056                                     UndefBits, &LHS)) ||
11057         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11058                                     UndefBits, &LHS)))
11059       return NewOp;
11060   }
11061 
11062   // We can always fall back to a non-immediate OR.
11063   return Op;
11064 }
11065 
11066 // Normalize the operands of BUILD_VECTOR. The value of constant operands will
11067 // be truncated to fit element width.
11068 static SDValue NormalizeBuildVector(SDValue Op,
11069                                     SelectionDAG &DAG) {
11070   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
11071   SDLoc dl(Op);
11072   EVT VT = Op.getValueType();
11073   EVT EltTy= VT.getVectorElementType();
11074 
11075   if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
11076     return Op;
11077 
11078   SmallVector<SDValue, 16> Ops;
11079   for (SDValue Lane : Op->ops()) {
11080     // For integer vectors, type legalization would have promoted the
11081     // operands already. Otherwise, if Op is a floating-point splat
11082     // (with operands cast to integers), then the only possibilities
11083     // are constants and UNDEFs.
11084     if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
11085       APInt LowBits(EltTy.getSizeInBits(),
11086                     CstLane->getZExtValue());
11087       Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
11088     } else if (Lane.getNode()->isUndef()) {
11089       Lane = DAG.getUNDEF(MVT::i32);
11090     } else {
11091       assert(Lane.getValueType() == MVT::i32 &&
11092              "Unexpected BUILD_VECTOR operand type");
11093     }
11094     Ops.push_back(Lane);
11095   }
11096   return DAG.getBuildVector(VT, dl, Ops);
11097 }
11098 
11099 static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
11100   EVT VT = Op.getValueType();
11101 
11102   APInt DefBits(VT.getSizeInBits(), 0);
11103   APInt UndefBits(VT.getSizeInBits(), 0);
11104   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11105   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11106     SDValue NewOp;
11107     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11108         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11109         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11110         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11111         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11112         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11113       return NewOp;
11114 
11115     DefBits = ~DefBits;
11116     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11117         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11118         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11119       return NewOp;
11120 
11121     DefBits = UndefBits;
11122     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11123         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11124         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11125         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11126         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11127         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11128       return NewOp;
11129 
11130     DefBits = ~UndefBits;
11131     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11132         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11133         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11134       return NewOp;
11135   }
11136 
11137   return SDValue();
11138 }
11139 
11140 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
11141                                                  SelectionDAG &DAG) const {
11142   EVT VT = Op.getValueType();
11143 
11144   if (useSVEForFixedLengthVectorVT(VT)) {
11145     if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
11146       SDLoc DL(Op);
11147       EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
11148       SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
11149       SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
11150       SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
11151       return convertFromScalableVector(DAG, Op.getValueType(), Seq);
11152     }
11153 
11154     // Revert to common legalisation for all other variants.
11155     return SDValue();
11156   }
11157 
11158   // Try to build a simple constant vector.
11159   Op = NormalizeBuildVector(Op, DAG);
11160   if (VT.isInteger()) {
11161     // Certain vector constants, used to express things like logical NOT and
11162     // arithmetic NEG, are passed through unmodified.  This allows special
11163     // patterns for these operations to match, which will lower these constants
11164     // to whatever is proven necessary.
11165     BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11166     if (BVN->isConstant())
11167       if (ConstantSDNode *Const = BVN->getConstantSplatNode()) {
11168         unsigned BitSize = VT.getVectorElementType().getSizeInBits();
11169         APInt Val(BitSize,
11170                   Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
11171         if (Val.isZero() || Val.isAllOnes())
11172           return Op;
11173       }
11174   }
11175 
11176   if (SDValue V = ConstantBuildVector(Op, DAG))
11177     return V;
11178 
11179   // Scan through the operands to find some interesting properties we can
11180   // exploit:
11181   //   1) If only one value is used, we can use a DUP, or
11182   //   2) if only the low element is not undef, we can just insert that, or
11183   //   3) if only one constant value is used (w/ some non-constant lanes),
11184   //      we can splat the constant value into the whole vector then fill
11185   //      in the non-constant lanes.
11186   //   4) FIXME: If different constant values are used, but we can intelligently
11187   //             select the values we'll be overwriting for the non-constant
11188   //             lanes such that we can directly materialize the vector
11189   //             some other way (MOVI, e.g.), we can be sneaky.
11190   //   5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP.
11191   SDLoc dl(Op);
11192   unsigned NumElts = VT.getVectorNumElements();
11193   bool isOnlyLowElement = true;
11194   bool usesOnlyOneValue = true;
11195   bool usesOnlyOneConstantValue = true;
11196   bool isConstant = true;
11197   bool AllLanesExtractElt = true;
11198   unsigned NumConstantLanes = 0;
11199   unsigned NumDifferentLanes = 0;
11200   unsigned NumUndefLanes = 0;
11201   SDValue Value;
11202   SDValue ConstantValue;
11203   for (unsigned i = 0; i < NumElts; ++i) {
11204     SDValue V = Op.getOperand(i);
11205     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11206       AllLanesExtractElt = false;
11207     if (V.isUndef()) {
11208       ++NumUndefLanes;
11209       continue;
11210     }
11211     if (i > 0)
11212       isOnlyLowElement = false;
11213     if (!isIntOrFPConstant(V))
11214       isConstant = false;
11215 
11216     if (isIntOrFPConstant(V)) {
11217       ++NumConstantLanes;
11218       if (!ConstantValue.getNode())
11219         ConstantValue = V;
11220       else if (ConstantValue != V)
11221         usesOnlyOneConstantValue = false;
11222     }
11223 
11224     if (!Value.getNode())
11225       Value = V;
11226     else if (V != Value) {
11227       usesOnlyOneValue = false;
11228       ++NumDifferentLanes;
11229     }
11230   }
11231 
11232   if (!Value.getNode()) {
11233     LLVM_DEBUG(
11234         dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
11235     return DAG.getUNDEF(VT);
11236   }
11237 
11238   // Convert BUILD_VECTOR where all elements but the lowest are undef into
11239   // SCALAR_TO_VECTOR, except for when we have a single-element constant vector
11240   // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR.
11241   if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) {
11242     LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
11243                          "SCALAR_TO_VECTOR node\n");
11244     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
11245   }
11246 
11247   if (AllLanesExtractElt) {
11248     SDNode *Vector = nullptr;
11249     bool Even = false;
11250     bool Odd = false;
11251     // Check whether the extract elements match the Even pattern <0,2,4,...> or
11252     // the Odd pattern <1,3,5,...>.
11253     for (unsigned i = 0; i < NumElts; ++i) {
11254       SDValue V = Op.getOperand(i);
11255       const SDNode *N = V.getNode();
11256       if (!isa<ConstantSDNode>(N->getOperand(1)))
11257         break;
11258       SDValue N0 = N->getOperand(0);
11259 
11260       // All elements are extracted from the same vector.
11261       if (!Vector) {
11262         Vector = N0.getNode();
11263         // Check that the type of EXTRACT_VECTOR_ELT matches the type of
11264         // BUILD_VECTOR.
11265         if (VT.getVectorElementType() !=
11266             N0.getValueType().getVectorElementType())
11267           break;
11268       } else if (Vector != N0.getNode()) {
11269         Odd = false;
11270         Even = false;
11271         break;
11272       }
11273 
11274       // Extracted values are either at Even indices <0,2,4,...> or at Odd
11275       // indices <1,3,5,...>.
11276       uint64_t Val = N->getConstantOperandVal(1);
11277       if (Val == 2 * i) {
11278         Even = true;
11279         continue;
11280       }
11281       if (Val - 1 == 2 * i) {
11282         Odd = true;
11283         continue;
11284       }
11285 
11286       // Something does not match: abort.
11287       Odd = false;
11288       Even = false;
11289       break;
11290     }
11291     if (Even || Odd) {
11292       SDValue LHS =
11293           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11294                       DAG.getConstant(0, dl, MVT::i64));
11295       SDValue RHS =
11296           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11297                       DAG.getConstant(NumElts, dl, MVT::i64));
11298 
11299       if (Even && !Odd)
11300         return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
11301                            RHS);
11302       if (Odd && !Even)
11303         return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
11304                            RHS);
11305     }
11306   }
11307 
11308   // Use DUP for non-constant splats. For f32 constant splats, reduce to
11309   // i32 and try again.
11310   if (usesOnlyOneValue) {
11311     if (!isConstant) {
11312       if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11313           Value.getValueType() != VT) {
11314         LLVM_DEBUG(
11315             dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
11316         return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
11317       }
11318 
11319       // This is actually a DUPLANExx operation, which keeps everything vectory.
11320 
11321       SDValue Lane = Value.getOperand(1);
11322       Value = Value.getOperand(0);
11323       if (Value.getValueSizeInBits() == 64) {
11324         LLVM_DEBUG(
11325             dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
11326                       "widening it\n");
11327         Value = WidenVector(Value, DAG);
11328       }
11329 
11330       unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
11331       return DAG.getNode(Opcode, dl, VT, Value, Lane);
11332     }
11333 
11334     if (VT.getVectorElementType().isFloatingPoint()) {
11335       SmallVector<SDValue, 8> Ops;
11336       EVT EltTy = VT.getVectorElementType();
11337       assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||
11338                EltTy == MVT::f64) && "Unsupported floating-point vector type");
11339       LLVM_DEBUG(
11340           dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
11341                     "BITCASTS, and try again\n");
11342       MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
11343       for (unsigned i = 0; i < NumElts; ++i)
11344         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
11345       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
11346       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
11347       LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
11348                  Val.dump(););
11349       Val = LowerBUILD_VECTOR(Val, DAG);
11350       if (Val.getNode())
11351         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
11352     }
11353   }
11354 
11355   // If we need to insert a small number of different non-constant elements and
11356   // the vector width is sufficiently large, prefer using DUP with the common
11357   // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred,
11358   // skip the constant lane handling below.
11359   bool PreferDUPAndInsert =
11360       !isConstant && NumDifferentLanes >= 1 &&
11361       NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) &&
11362       NumDifferentLanes >= NumConstantLanes;
11363 
11364   // If there was only one constant value used and for more than one lane,
11365   // start by splatting that value, then replace the non-constant lanes. This
11366   // is better than the default, which will perform a separate initialization
11367   // for each lane.
11368   if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) {
11369     // Firstly, try to materialize the splat constant.
11370     SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue),
11371             Val = ConstantBuildVector(Vec, DAG);
11372     if (!Val) {
11373       // Otherwise, materialize the constant and splat it.
11374       Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
11375       DAG.ReplaceAllUsesWith(Vec.getNode(), &Val);
11376     }
11377 
11378     // Now insert the non-constant lanes.
11379     for (unsigned i = 0; i < NumElts; ++i) {
11380       SDValue V = Op.getOperand(i);
11381       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11382       if (!isIntOrFPConstant(V))
11383         // Note that type legalization likely mucked about with the VT of the
11384         // source operand, so we may have to convert it here before inserting.
11385         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
11386     }
11387     return Val;
11388   }
11389 
11390   // This will generate a load from the constant pool.
11391   if (isConstant) {
11392     LLVM_DEBUG(
11393         dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
11394                   "expansion\n");
11395     return SDValue();
11396   }
11397 
11398   // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
11399   // v4i32s. This is really a truncate, which we can construct out of (legal)
11400   // concats and truncate nodes.
11401   if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG))
11402     return M;
11403 
11404   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
11405   if (NumElts >= 4) {
11406     if (SDValue shuffle = ReconstructShuffle(Op, DAG))
11407       return shuffle;
11408   }
11409 
11410   if (PreferDUPAndInsert) {
11411     // First, build a constant vector with the common element.
11412     SmallVector<SDValue, 8> Ops(NumElts, Value);
11413     SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
11414     // Next, insert the elements that do not match the common value.
11415     for (unsigned I = 0; I < NumElts; ++I)
11416       if (Op.getOperand(I) != Value)
11417         NewVector =
11418             DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
11419                         Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
11420 
11421     return NewVector;
11422   }
11423 
11424   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
11425   // know the default expansion would otherwise fall back on something even
11426   // worse. For a vector with one or two non-undef values, that's
11427   // scalar_to_vector for the elements followed by a shuffle (provided the
11428   // shuffle is valid for the target) and materialization element by element
11429   // on the stack followed by a load for everything else.
11430   if (!isConstant && !usesOnlyOneValue) {
11431     LLVM_DEBUG(
11432         dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
11433                   "of INSERT_VECTOR_ELT\n");
11434 
11435     SDValue Vec = DAG.getUNDEF(VT);
11436     SDValue Op0 = Op.getOperand(0);
11437     unsigned i = 0;
11438 
11439     // Use SCALAR_TO_VECTOR for lane zero to
11440     // a) Avoid a RMW dependency on the full vector register, and
11441     // b) Allow the register coalescer to fold away the copy if the
11442     //    value is already in an S or D register, and we're forced to emit an
11443     //    INSERT_SUBREG that we can't fold anywhere.
11444     //
11445     // We also allow types like i8 and i16 which are illegal scalar but legal
11446     // vector element types. After type-legalization the inserted value is
11447     // extended (i32) and it is safe to cast them to the vector type by ignoring
11448     // the upper bits of the lowest lane (e.g. v8i8, v4i16).
11449     if (!Op0.isUndef()) {
11450       LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
11451       Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
11452       ++i;
11453     }
11454     LLVM_DEBUG(if (i < NumElts) dbgs()
11455                    << "Creating nodes for the other vector elements:\n";);
11456     for (; i < NumElts; ++i) {
11457       SDValue V = Op.getOperand(i);
11458       if (V.isUndef())
11459         continue;
11460       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11461       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
11462     }
11463     return Vec;
11464   }
11465 
11466   LLVM_DEBUG(
11467       dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
11468                 "better alternative\n");
11469   return SDValue();
11470 }
11471 
11472 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
11473                                                    SelectionDAG &DAG) const {
11474   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11475     return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
11476 
11477   assert(Op.getValueType().isScalableVector() &&
11478          isTypeLegal(Op.getValueType()) &&
11479          "Expected legal scalable vector type!");
11480 
11481   if (isTypeLegal(Op.getOperand(0).getValueType())) {
11482     unsigned NumOperands = Op->getNumOperands();
11483     assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11484            "Unexpected number of operands in CONCAT_VECTORS");
11485 
11486     if (NumOperands == 2)
11487       return Op;
11488 
11489     // Concat each pair of subvectors and pack into the lower half of the array.
11490     SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
11491     while (ConcatOps.size() > 1) {
11492       for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
11493         SDValue V1 = ConcatOps[I];
11494         SDValue V2 = ConcatOps[I + 1];
11495         EVT SubVT = V1.getValueType();
11496         EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext());
11497         ConcatOps[I / 2] =
11498             DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2);
11499       }
11500       ConcatOps.resize(ConcatOps.size() / 2);
11501     }
11502     return ConcatOps[0];
11503   }
11504 
11505   return SDValue();
11506 }
11507 
11508 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11509                                                       SelectionDAG &DAG) const {
11510   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
11511 
11512   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11513     return LowerFixedLengthInsertVectorElt(Op, DAG);
11514 
11515   // Check for non-constant or out of range lane.
11516   EVT VT = Op.getOperand(0).getValueType();
11517 
11518   if (VT.getScalarType() == MVT::i1) {
11519     EVT VectorVT = getPromotedVTForPredicate(VT);
11520     SDLoc DL(Op);
11521     SDValue ExtendedVector =
11522         DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT);
11523     SDValue ExtendedValue =
11524         DAG.getAnyExtOrTrunc(Op.getOperand(1), DL,
11525                              VectorVT.getScalarType().getSizeInBits() < 32
11526                                  ? MVT::i32
11527                                  : VectorVT.getScalarType());
11528     ExtendedVector =
11529         DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector,
11530                     ExtendedValue, Op.getOperand(2));
11531     return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT);
11532   }
11533 
11534   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
11535   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11536     return SDValue();
11537 
11538   // Insertion/extraction are legal for V128 types.
11539   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11540       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11541       VT == MVT::v8f16 || VT == MVT::v8bf16)
11542     return Op;
11543 
11544   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11545       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11546       VT != MVT::v4bf16)
11547     return SDValue();
11548 
11549   // For V64 types, we perform insertion by expanding the value
11550   // to a V128 type and perform the insertion on that.
11551   SDLoc DL(Op);
11552   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11553   EVT WideTy = WideVec.getValueType();
11554 
11555   SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
11556                              Op.getOperand(1), Op.getOperand(2));
11557   // Re-narrow the resultant vector.
11558   return NarrowVector(Node, DAG);
11559 }
11560 
11561 SDValue
11562 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
11563                                                SelectionDAG &DAG) const {
11564   assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
11565   EVT VT = Op.getOperand(0).getValueType();
11566 
11567   if (VT.getScalarType() == MVT::i1) {
11568     // We can't directly extract from an SVE predicate; extend it first.
11569     // (This isn't the only possible lowering, but it's straightforward.)
11570     EVT VectorVT = getPromotedVTForPredicate(VT);
11571     SDLoc DL(Op);
11572     SDValue Extend =
11573         DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0));
11574     MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32;
11575     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy,
11576                                   Extend, Op.getOperand(1));
11577     return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
11578   }
11579 
11580   if (useSVEForFixedLengthVectorVT(VT))
11581     return LowerFixedLengthExtractVectorElt(Op, DAG);
11582 
11583   // Check for non-constant or out of range lane.
11584   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
11585   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11586     return SDValue();
11587 
11588   // Insertion/extraction are legal for V128 types.
11589   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11590       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11591       VT == MVT::v8f16 || VT == MVT::v8bf16)
11592     return Op;
11593 
11594   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11595       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11596       VT != MVT::v4bf16)
11597     return SDValue();
11598 
11599   // For V64 types, we perform extraction by expanding the value
11600   // to a V128 type and perform the extraction on that.
11601   SDLoc DL(Op);
11602   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11603   EVT WideTy = WideVec.getValueType();
11604 
11605   EVT ExtrTy = WideTy.getVectorElementType();
11606   if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
11607     ExtrTy = MVT::i32;
11608 
11609   // For extractions, we just return the result directly.
11610   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
11611                      Op.getOperand(1));
11612 }
11613 
11614 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
11615                                                       SelectionDAG &DAG) const {
11616   assert(Op.getValueType().isFixedLengthVector() &&
11617          "Only cases that extract a fixed length vector are supported!");
11618 
11619   EVT InVT = Op.getOperand(0).getValueType();
11620   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
11621   unsigned Size = Op.getValueSizeInBits();
11622 
11623   // If we don't have legal types yet, do nothing
11624   if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
11625     return SDValue();
11626 
11627   if (InVT.isScalableVector()) {
11628     // This will be matched by custom code during ISelDAGToDAG.
11629     if (Idx == 0 && isPackedVectorType(InVT, DAG))
11630       return Op;
11631 
11632     return SDValue();
11633   }
11634 
11635   // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
11636   if (Idx == 0 && InVT.getSizeInBits() <= 128)
11637     return Op;
11638 
11639   // If this is extracting the upper 64-bits of a 128-bit vector, we match
11640   // that directly.
11641   if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
11642       InVT.getSizeInBits() == 128)
11643     return Op;
11644 
11645   if (useSVEForFixedLengthVectorVT(InVT)) {
11646     SDLoc DL(Op);
11647 
11648     EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
11649     SDValue NewInVec =
11650         convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
11651 
11652     SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
11653                                  NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
11654     return convertFromScalableVector(DAG, Op.getValueType(), Splice);
11655   }
11656 
11657   return SDValue();
11658 }
11659 
11660 SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
11661                                                      SelectionDAG &DAG) const {
11662   assert(Op.getValueType().isScalableVector() &&
11663          "Only expect to lower inserts into scalable vectors!");
11664 
11665   EVT InVT = Op.getOperand(1).getValueType();
11666   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
11667 
11668   SDValue Vec0 = Op.getOperand(0);
11669   SDValue Vec1 = Op.getOperand(1);
11670   SDLoc DL(Op);
11671   EVT VT = Op.getValueType();
11672 
11673   if (InVT.isScalableVector()) {
11674     if (!isTypeLegal(VT))
11675       return SDValue();
11676 
11677     // Break down insert_subvector into simpler parts.
11678     if (VT.getVectorElementType() == MVT::i1) {
11679       unsigned NumElts = VT.getVectorMinNumElements();
11680       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
11681 
11682       SDValue Lo, Hi;
11683       Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11684                        DAG.getVectorIdxConstant(0, DL));
11685       Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11686                        DAG.getVectorIdxConstant(NumElts / 2, DL));
11687       if (Idx < (NumElts / 2)) {
11688         SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
11689                                     DAG.getVectorIdxConstant(Idx, DL));
11690         return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
11691       } else {
11692         SDValue NewHi =
11693             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
11694                         DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
11695         return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
11696       }
11697     }
11698 
11699     // Ensure the subvector is half the size of the main vector.
11700     if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
11701       return SDValue();
11702 
11703     // Here narrow and wide refers to the vector element types. After "casting"
11704     // both vectors must have the same bit length and so because the subvector
11705     // has fewer elements, those elements need to be bigger.
11706     EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount());
11707     EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount());
11708 
11709     // NOP cast operands to the largest legal vector of the same element count.
11710     if (VT.isFloatingPoint()) {
11711       Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
11712       Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
11713     } else {
11714       // Legal integer vectors are already their largest so Vec0 is fine as is.
11715       Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
11716     }
11717 
11718     // To replace the top/bottom half of vector V with vector SubV we widen the
11719     // preserved half of V, concatenate this to SubV (the order depending on the
11720     // half being replaced) and then narrow the result.
11721     SDValue Narrow;
11722     if (Idx == 0) {
11723       SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
11724       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
11725     } else {
11726       assert(Idx == InVT.getVectorMinNumElements() &&
11727              "Invalid subvector index!");
11728       SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
11729       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
11730     }
11731 
11732     return getSVESafeBitCast(VT, Narrow, DAG);
11733   }
11734 
11735   if (Idx == 0 && isPackedVectorType(VT, DAG)) {
11736     // This will be matched by custom code during ISelDAGToDAG.
11737     if (Vec0.isUndef())
11738       return Op;
11739 
11740     Optional<unsigned> PredPattern =
11741         getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
11742     auto PredTy = VT.changeVectorElementType(MVT::i1);
11743     SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
11744     SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
11745     return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
11746   }
11747 
11748   return SDValue();
11749 }
11750 
11751 static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {
11752   if (Op.getOpcode() != AArch64ISD::DUP &&
11753       Op.getOpcode() != ISD::SPLAT_VECTOR &&
11754       Op.getOpcode() != ISD::BUILD_VECTOR)
11755     return false;
11756 
11757   if (Op.getOpcode() == ISD::BUILD_VECTOR &&
11758       !isAllConstantBuildVector(Op, SplatVal))
11759     return false;
11760 
11761   if (Op.getOpcode() != ISD::BUILD_VECTOR &&
11762       !isa<ConstantSDNode>(Op->getOperand(0)))
11763     return false;
11764 
11765   SplatVal = Op->getConstantOperandVal(0);
11766   if (Op.getValueType().getVectorElementType() != MVT::i64)
11767     SplatVal = (int32_t)SplatVal;
11768 
11769   Negated = false;
11770   if (isPowerOf2_64(SplatVal))
11771     return true;
11772 
11773   Negated = true;
11774   if (isPowerOf2_64(-SplatVal)) {
11775     SplatVal = -SplatVal;
11776     return true;
11777   }
11778 
11779   return false;
11780 }
11781 
11782 SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
11783   EVT VT = Op.getValueType();
11784   SDLoc dl(Op);
11785 
11786   if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
11787     return LowerFixedLengthVectorIntDivideToSVE(Op, DAG);
11788 
11789   assert(VT.isScalableVector() && "Expected a scalable vector.");
11790 
11791   bool Signed = Op.getOpcode() == ISD::SDIV;
11792   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
11793 
11794   bool Negated;
11795   uint64_t SplatVal;
11796   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
11797     SDValue Pg = getPredicateForScalableVector(DAG, dl, VT);
11798     SDValue Res =
11799         DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0),
11800                     DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32));
11801     if (Negated)
11802       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
11803 
11804     return Res;
11805   }
11806 
11807   if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64)
11808     return LowerToPredicatedOp(Op, DAG, PredOpcode);
11809 
11810   // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit
11811   // operations, and truncate the result.
11812   EVT WidenedVT;
11813   if (VT == MVT::nxv16i8)
11814     WidenedVT = MVT::nxv8i16;
11815   else if (VT == MVT::nxv8i16)
11816     WidenedVT = MVT::nxv4i32;
11817   else
11818     llvm_unreachable("Unexpected Custom DIV operation");
11819 
11820   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
11821   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
11822   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0));
11823   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1));
11824   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0));
11825   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
11826   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
11827   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
11828   return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
11829 }
11830 
11831 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
11832   // Currently no fixed length shuffles that require SVE are legal.
11833   if (useSVEForFixedLengthVectorVT(VT))
11834     return false;
11835 
11836   if (VT.getVectorNumElements() == 4 &&
11837       (VT.is128BitVector() || VT.is64BitVector())) {
11838     unsigned Cost = getPerfectShuffleCost(M);
11839     if (Cost <= 1)
11840       return true;
11841   }
11842 
11843   bool DummyBool;
11844   int DummyInt;
11845   unsigned DummyUnsigned;
11846 
11847   return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
11848           isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
11849           isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
11850           // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
11851           isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
11852           isZIPMask(M, VT, DummyUnsigned) ||
11853           isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
11854           isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
11855           isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
11856           isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
11857           isConcatMask(M, VT, VT.getSizeInBits() == 128));
11858 }
11859 
11860 bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
11861                                                    EVT VT) const {
11862   // Just delegate to the generic legality, clear masks aren't special.
11863   return isShuffleMaskLegal(M, VT);
11864 }
11865 
11866 /// getVShiftImm - Check if this is a valid build_vector for the immediate
11867 /// operand of a vector shift operation, where all the elements of the
11868 /// build_vector must have the same constant integer value.
11869 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11870   // Ignore bit_converts.
11871   while (Op.getOpcode() == ISD::BITCAST)
11872     Op = Op.getOperand(0);
11873   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11874   APInt SplatBits, SplatUndef;
11875   unsigned SplatBitSize;
11876   bool HasAnyUndefs;
11877   if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11878                                     HasAnyUndefs, ElementBits) ||
11879       SplatBitSize > ElementBits)
11880     return false;
11881   Cnt = SplatBits.getSExtValue();
11882   return true;
11883 }
11884 
11885 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11886 /// operand of a vector shift left operation.  That value must be in the range:
11887 ///   0 <= Value < ElementBits for a left shift; or
11888 ///   0 <= Value <= ElementBits for a long left shift.
11889 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11890   assert(VT.isVector() && "vector shift count is not a vector type");
11891   int64_t ElementBits = VT.getScalarSizeInBits();
11892   if (!getVShiftImm(Op, ElementBits, Cnt))
11893     return false;
11894   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
11895 }
11896 
11897 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11898 /// operand of a vector shift right operation. The value must be in the range:
11899 ///   1 <= Value <= ElementBits for a right shift; or
11900 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
11901   assert(VT.isVector() && "vector shift count is not a vector type");
11902   int64_t ElementBits = VT.getScalarSizeInBits();
11903   if (!getVShiftImm(Op, ElementBits, Cnt))
11904     return false;
11905   return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
11906 }
11907 
11908 SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
11909                                              SelectionDAG &DAG) const {
11910   EVT VT = Op.getValueType();
11911 
11912   if (VT.getScalarType() == MVT::i1) {
11913     // Lower i1 truncate to `(x & 1) != 0`.
11914     SDLoc dl(Op);
11915     EVT OpVT = Op.getOperand(0).getValueType();
11916     SDValue Zero = DAG.getConstant(0, dl, OpVT);
11917     SDValue One = DAG.getConstant(1, dl, OpVT);
11918     SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One);
11919     return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE);
11920   }
11921 
11922   if (!VT.isVector() || VT.isScalableVector())
11923     return SDValue();
11924 
11925   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
11926     return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
11927 
11928   return SDValue();
11929 }
11930 
11931 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
11932                                                       SelectionDAG &DAG) const {
11933   EVT VT = Op.getValueType();
11934   SDLoc DL(Op);
11935   int64_t Cnt;
11936 
11937   if (!Op.getOperand(1).getValueType().isVector())
11938     return Op;
11939   unsigned EltSize = VT.getScalarSizeInBits();
11940 
11941   switch (Op.getOpcode()) {
11942   case ISD::SHL:
11943     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
11944       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
11945 
11946     if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
11947       return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
11948                          DAG.getConstant(Cnt, DL, MVT::i32));
11949     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11950                        DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
11951                                        MVT::i32),
11952                        Op.getOperand(0), Op.getOperand(1));
11953   case ISD::SRA:
11954   case ISD::SRL:
11955     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) {
11956       unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
11957                                                 : AArch64ISD::SRL_PRED;
11958       return LowerToPredicatedOp(Op, DAG, Opc);
11959     }
11960 
11961     // Right shift immediate
11962     if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
11963       unsigned Opc =
11964           (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
11965       return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
11966                          DAG.getConstant(Cnt, DL, MVT::i32));
11967     }
11968 
11969     // Right shift register.  Note, there is not a shift right register
11970     // instruction, but the shift left register instruction takes a signed
11971     // value, where negative numbers specify a right shift.
11972     unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
11973                                                 : Intrinsic::aarch64_neon_ushl;
11974     // negate the shift amount
11975     SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
11976                                    Op.getOperand(1));
11977     SDValue NegShiftLeft =
11978         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11979                     DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
11980                     NegShift);
11981     return NegShiftLeft;
11982   }
11983 
11984   llvm_unreachable("unexpected shift opcode");
11985 }
11986 
11987 static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
11988                                     AArch64CC::CondCode CC, bool NoNans, EVT VT,
11989                                     const SDLoc &dl, SelectionDAG &DAG) {
11990   EVT SrcVT = LHS.getValueType();
11991   assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
11992          "function only supposed to emit natural comparisons");
11993 
11994   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
11995   APInt CnstBits(VT.getSizeInBits(), 0);
11996   APInt UndefBits(VT.getSizeInBits(), 0);
11997   bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
11998   bool IsZero = IsCnst && (CnstBits == 0);
11999 
12000   if (SrcVT.getVectorElementType().isFloatingPoint()) {
12001     switch (CC) {
12002     default:
12003       return SDValue();
12004     case AArch64CC::NE: {
12005       SDValue Fcmeq;
12006       if (IsZero)
12007         Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
12008       else
12009         Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
12010       return DAG.getNOT(dl, Fcmeq, VT);
12011     }
12012     case AArch64CC::EQ:
12013       if (IsZero)
12014         return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
12015       return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
12016     case AArch64CC::GE:
12017       if (IsZero)
12018         return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
12019       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
12020     case AArch64CC::GT:
12021       if (IsZero)
12022         return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
12023       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
12024     case AArch64CC::LE:
12025       if (!NoNans)
12026         return SDValue();
12027       // If we ignore NaNs then we can use to the LS implementation.
12028       LLVM_FALLTHROUGH;
12029     case AArch64CC::LS:
12030       if (IsZero)
12031         return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
12032       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
12033     case AArch64CC::LT:
12034       if (!NoNans)
12035         return SDValue();
12036       // If we ignore NaNs then we can use to the MI implementation.
12037       LLVM_FALLTHROUGH;
12038     case AArch64CC::MI:
12039       if (IsZero)
12040         return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
12041       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
12042     }
12043   }
12044 
12045   switch (CC) {
12046   default:
12047     return SDValue();
12048   case AArch64CC::NE: {
12049     SDValue Cmeq;
12050     if (IsZero)
12051       Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12052     else
12053       Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12054     return DAG.getNOT(dl, Cmeq, VT);
12055   }
12056   case AArch64CC::EQ:
12057     if (IsZero)
12058       return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12059     return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12060   case AArch64CC::GE:
12061     if (IsZero)
12062       return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
12063     return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
12064   case AArch64CC::GT:
12065     if (IsZero)
12066       return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
12067     return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
12068   case AArch64CC::LE:
12069     if (IsZero)
12070       return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
12071     return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
12072   case AArch64CC::LS:
12073     return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
12074   case AArch64CC::LO:
12075     return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
12076   case AArch64CC::LT:
12077     if (IsZero)
12078       return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
12079     return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
12080   case AArch64CC::HI:
12081     return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
12082   case AArch64CC::HS:
12083     return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
12084   }
12085 }
12086 
12087 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
12088                                            SelectionDAG &DAG) const {
12089   if (Op.getValueType().isScalableVector())
12090     return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
12091 
12092   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
12093     return LowerFixedLengthVectorSetccToSVE(Op, DAG);
12094 
12095   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
12096   SDValue LHS = Op.getOperand(0);
12097   SDValue RHS = Op.getOperand(1);
12098   EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
12099   SDLoc dl(Op);
12100 
12101   if (LHS.getValueType().getVectorElementType().isInteger()) {
12102     assert(LHS.getValueType() == RHS.getValueType());
12103     AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
12104     SDValue Cmp =
12105         EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
12106     return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12107   }
12108 
12109   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
12110 
12111   // Make v4f16 (only) fcmp operations utilise vector instructions
12112   // v8f16 support will be a litle more complicated
12113   if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) {
12114     if (LHS.getValueType().getVectorNumElements() == 4) {
12115       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS);
12116       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS);
12117       SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC);
12118       DAG.ReplaceAllUsesWith(Op, NewSetcc);
12119       CmpVT = MVT::v4i32;
12120     } else
12121       return SDValue();
12122   }
12123 
12124   assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||
12125           LHS.getValueType().getVectorElementType() != MVT::f128);
12126 
12127   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
12128   // clean.  Some of them require two branches to implement.
12129   AArch64CC::CondCode CC1, CC2;
12130   bool ShouldInvert;
12131   changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
12132 
12133   bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs();
12134   SDValue Cmp =
12135       EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
12136   if (!Cmp.getNode())
12137     return SDValue();
12138 
12139   if (CC2 != AArch64CC::AL) {
12140     SDValue Cmp2 =
12141         EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
12142     if (!Cmp2.getNode())
12143       return SDValue();
12144 
12145     Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
12146   }
12147 
12148   Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12149 
12150   if (ShouldInvert)
12151     Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
12152 
12153   return Cmp;
12154 }
12155 
12156 static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
12157                                   SelectionDAG &DAG) {
12158   SDValue VecOp = ScalarOp.getOperand(0);
12159   auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
12160   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
12161                      DAG.getConstant(0, DL, MVT::i64));
12162 }
12163 
12164 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
12165                                               SelectionDAG &DAG) const {
12166   SDValue Src = Op.getOperand(0);
12167 
12168   // Try to lower fixed length reductions to SVE.
12169   EVT SrcVT = Src.getValueType();
12170   bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND ||
12171                       Op.getOpcode() == ISD::VECREDUCE_OR ||
12172                       Op.getOpcode() == ISD::VECREDUCE_XOR ||
12173                       Op.getOpcode() == ISD::VECREDUCE_FADD ||
12174                       (Op.getOpcode() != ISD::VECREDUCE_ADD &&
12175                        SrcVT.getVectorElementType() == MVT::i64);
12176   if (SrcVT.isScalableVector() ||
12177       useSVEForFixedLengthVectorVT(
12178           SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) {
12179 
12180     if (SrcVT.getVectorElementType() == MVT::i1)
12181       return LowerPredReductionToSVE(Op, DAG);
12182 
12183     switch (Op.getOpcode()) {
12184     case ISD::VECREDUCE_ADD:
12185       return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
12186     case ISD::VECREDUCE_AND:
12187       return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG);
12188     case ISD::VECREDUCE_OR:
12189       return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG);
12190     case ISD::VECREDUCE_SMAX:
12191       return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
12192     case ISD::VECREDUCE_SMIN:
12193       return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
12194     case ISD::VECREDUCE_UMAX:
12195       return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
12196     case ISD::VECREDUCE_UMIN:
12197       return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
12198     case ISD::VECREDUCE_XOR:
12199       return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
12200     case ISD::VECREDUCE_FADD:
12201       return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG);
12202     case ISD::VECREDUCE_FMAX:
12203       return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
12204     case ISD::VECREDUCE_FMIN:
12205       return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG);
12206     default:
12207       llvm_unreachable("Unhandled fixed length reduction");
12208     }
12209   }
12210 
12211   // Lower NEON reductions.
12212   SDLoc dl(Op);
12213   switch (Op.getOpcode()) {
12214   case ISD::VECREDUCE_ADD:
12215     return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
12216   case ISD::VECREDUCE_SMAX:
12217     return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
12218   case ISD::VECREDUCE_SMIN:
12219     return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
12220   case ISD::VECREDUCE_UMAX:
12221     return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
12222   case ISD::VECREDUCE_UMIN:
12223     return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
12224   case ISD::VECREDUCE_FMAX: {
12225     return DAG.getNode(
12226         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12227         DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
12228         Src);
12229   }
12230   case ISD::VECREDUCE_FMIN: {
12231     return DAG.getNode(
12232         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12233         DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
12234         Src);
12235   }
12236   default:
12237     llvm_unreachable("Unhandled reduction");
12238   }
12239 }
12240 
12241 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
12242                                                     SelectionDAG &DAG) const {
12243   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12244   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12245     return SDValue();
12246 
12247   // LSE has an atomic load-add instruction, but not a load-sub.
12248   SDLoc dl(Op);
12249   MVT VT = Op.getSimpleValueType();
12250   SDValue RHS = Op.getOperand(2);
12251   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12252   RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
12253   return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
12254                        Op.getOperand(0), Op.getOperand(1), RHS,
12255                        AN->getMemOperand());
12256 }
12257 
12258 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
12259                                                     SelectionDAG &DAG) const {
12260   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12261   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12262     return SDValue();
12263 
12264   // LSE has an atomic load-clear instruction, but not a load-and.
12265   SDLoc dl(Op);
12266   MVT VT = Op.getSimpleValueType();
12267   SDValue RHS = Op.getOperand(2);
12268   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12269   RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS);
12270   return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(),
12271                        Op.getOperand(0), Op.getOperand(1), RHS,
12272                        AN->getMemOperand());
12273 }
12274 
12275 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
12276     SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const {
12277   SDLoc dl(Op);
12278   EVT PtrVT = getPointerTy(DAG.getDataLayout());
12279   SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0);
12280 
12281   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
12282   const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask();
12283   if (Subtarget->hasCustomCallingConv())
12284     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
12285 
12286   Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size,
12287                      DAG.getConstant(4, dl, MVT::i64));
12288   Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue());
12289   Chain =
12290       DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue),
12291                   Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64),
12292                   DAG.getRegisterMask(Mask), Chain.getValue(1));
12293   // To match the actual intent better, we should read the output from X15 here
12294   // again (instead of potentially spilling it to the stack), but rereading Size
12295   // from X15 here doesn't work at -O0, since it thinks that X15 is undefined
12296   // here.
12297 
12298   Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size,
12299                      DAG.getConstant(4, dl, MVT::i64));
12300   return Chain;
12301 }
12302 
12303 SDValue
12304 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
12305                                                SelectionDAG &DAG) const {
12306   assert(Subtarget->isTargetWindows() &&
12307          "Only Windows alloca probing supported");
12308   SDLoc dl(Op);
12309   // Get the inputs.
12310   SDNode *Node = Op.getNode();
12311   SDValue Chain = Op.getOperand(0);
12312   SDValue Size = Op.getOperand(1);
12313   MaybeAlign Align =
12314       cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
12315   EVT VT = Node->getValueType(0);
12316 
12317   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
12318           "no-stack-arg-probe")) {
12319     SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12320     Chain = SP.getValue(1);
12321     SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12322     if (Align)
12323       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12324                        DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12325     Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12326     SDValue Ops[2] = {SP, Chain};
12327     return DAG.getMergeValues(Ops, dl);
12328   }
12329 
12330   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
12331 
12332   Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
12333 
12334   SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12335   Chain = SP.getValue(1);
12336   SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12337   if (Align)
12338     SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12339                      DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12340   Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12341 
12342   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
12343                              DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
12344 
12345   SDValue Ops[2] = {SP, Chain};
12346   return DAG.getMergeValues(Ops, dl);
12347 }
12348 
12349 SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
12350                                            SelectionDAG &DAG) const {
12351   EVT VT = Op.getValueType();
12352   assert(VT != MVT::i64 && "Expected illegal VSCALE node");
12353 
12354   SDLoc DL(Op);
12355   APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
12356   return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
12357                             VT);
12358 }
12359 
12360 /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
12361 template <unsigned NumVecs>
12362 static bool
12363 setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
12364               AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
12365   Info.opc = ISD::INTRINSIC_VOID;
12366   // Retrieve EC from first vector argument.
12367   const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType());
12368   ElementCount EC = VT.getVectorElementCount();
12369 #ifndef NDEBUG
12370   // Check the assumption that all input vectors are the same type.
12371   for (unsigned I = 0; I < NumVecs; ++I)
12372     assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&
12373            "Invalid type.");
12374 #endif
12375   // memVT is `NumVecs * VT`.
12376   Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
12377                                 EC * NumVecs);
12378   Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1);
12379   Info.offset = 0;
12380   Info.align.reset();
12381   Info.flags = MachineMemOperand::MOStore;
12382   return true;
12383 }
12384 
12385 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12386 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
12387 /// specified in the intrinsic calls.
12388 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12389                                                const CallInst &I,
12390                                                MachineFunction &MF,
12391                                                unsigned Intrinsic) const {
12392   auto &DL = I.getModule()->getDataLayout();
12393   switch (Intrinsic) {
12394   case Intrinsic::aarch64_sve_st2:
12395     return setInfoSVEStN<2>(*this, DL, Info, I);
12396   case Intrinsic::aarch64_sve_st3:
12397     return setInfoSVEStN<3>(*this, DL, Info, I);
12398   case Intrinsic::aarch64_sve_st4:
12399     return setInfoSVEStN<4>(*this, DL, Info, I);
12400   case Intrinsic::aarch64_neon_ld2:
12401   case Intrinsic::aarch64_neon_ld3:
12402   case Intrinsic::aarch64_neon_ld4:
12403   case Intrinsic::aarch64_neon_ld1x2:
12404   case Intrinsic::aarch64_neon_ld1x3:
12405   case Intrinsic::aarch64_neon_ld1x4:
12406   case Intrinsic::aarch64_neon_ld2lane:
12407   case Intrinsic::aarch64_neon_ld3lane:
12408   case Intrinsic::aarch64_neon_ld4lane:
12409   case Intrinsic::aarch64_neon_ld2r:
12410   case Intrinsic::aarch64_neon_ld3r:
12411   case Intrinsic::aarch64_neon_ld4r: {
12412     Info.opc = ISD::INTRINSIC_W_CHAIN;
12413     // Conservatively set memVT to the entire set of vectors loaded.
12414     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12415     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12416     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12417     Info.offset = 0;
12418     Info.align.reset();
12419     // volatile loads with NEON intrinsics not supported
12420     Info.flags = MachineMemOperand::MOLoad;
12421     return true;
12422   }
12423   case Intrinsic::aarch64_neon_st2:
12424   case Intrinsic::aarch64_neon_st3:
12425   case Intrinsic::aarch64_neon_st4:
12426   case Intrinsic::aarch64_neon_st1x2:
12427   case Intrinsic::aarch64_neon_st1x3:
12428   case Intrinsic::aarch64_neon_st1x4:
12429   case Intrinsic::aarch64_neon_st2lane:
12430   case Intrinsic::aarch64_neon_st3lane:
12431   case Intrinsic::aarch64_neon_st4lane: {
12432     Info.opc = ISD::INTRINSIC_VOID;
12433     // Conservatively set memVT to the entire set of vectors stored.
12434     unsigned NumElts = 0;
12435     for (const Value *Arg : I.args()) {
12436       Type *ArgTy = Arg->getType();
12437       if (!ArgTy->isVectorTy())
12438         break;
12439       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12440     }
12441     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12442     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12443     Info.offset = 0;
12444     Info.align.reset();
12445     // volatile stores with NEON intrinsics not supported
12446     Info.flags = MachineMemOperand::MOStore;
12447     return true;
12448   }
12449   case Intrinsic::aarch64_ldaxr:
12450   case Intrinsic::aarch64_ldxr: {
12451     Type *ValTy = I.getParamElementType(0);
12452     Info.opc = ISD::INTRINSIC_W_CHAIN;
12453     Info.memVT = MVT::getVT(ValTy);
12454     Info.ptrVal = I.getArgOperand(0);
12455     Info.offset = 0;
12456     Info.align = DL.getABITypeAlign(ValTy);
12457     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12458     return true;
12459   }
12460   case Intrinsic::aarch64_stlxr:
12461   case Intrinsic::aarch64_stxr: {
12462     Type *ValTy = I.getParamElementType(1);
12463     Info.opc = ISD::INTRINSIC_W_CHAIN;
12464     Info.memVT = MVT::getVT(ValTy);
12465     Info.ptrVal = I.getArgOperand(1);
12466     Info.offset = 0;
12467     Info.align = DL.getABITypeAlign(ValTy);
12468     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12469     return true;
12470   }
12471   case Intrinsic::aarch64_ldaxp:
12472   case Intrinsic::aarch64_ldxp:
12473     Info.opc = ISD::INTRINSIC_W_CHAIN;
12474     Info.memVT = MVT::i128;
12475     Info.ptrVal = I.getArgOperand(0);
12476     Info.offset = 0;
12477     Info.align = Align(16);
12478     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12479     return true;
12480   case Intrinsic::aarch64_stlxp:
12481   case Intrinsic::aarch64_stxp:
12482     Info.opc = ISD::INTRINSIC_W_CHAIN;
12483     Info.memVT = MVT::i128;
12484     Info.ptrVal = I.getArgOperand(2);
12485     Info.offset = 0;
12486     Info.align = Align(16);
12487     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12488     return true;
12489   case Intrinsic::aarch64_sve_ldnt1: {
12490     Type *ElTy = cast<VectorType>(I.getType())->getElementType();
12491     Info.opc = ISD::INTRINSIC_W_CHAIN;
12492     Info.memVT = MVT::getVT(I.getType());
12493     Info.ptrVal = I.getArgOperand(1);
12494     Info.offset = 0;
12495     Info.align = DL.getABITypeAlign(ElTy);
12496     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
12497     return true;
12498   }
12499   case Intrinsic::aarch64_sve_stnt1: {
12500     Type *ElTy =
12501         cast<VectorType>(I.getArgOperand(0)->getType())->getElementType();
12502     Info.opc = ISD::INTRINSIC_W_CHAIN;
12503     Info.memVT = MVT::getVT(I.getOperand(0)->getType());
12504     Info.ptrVal = I.getArgOperand(2);
12505     Info.offset = 0;
12506     Info.align = DL.getABITypeAlign(ElTy);
12507     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
12508     return true;
12509   }
12510   case Intrinsic::aarch64_mops_memset_tag: {
12511     Value *Dst = I.getArgOperand(0);
12512     Value *Val = I.getArgOperand(1);
12513     Info.opc = ISD::INTRINSIC_W_CHAIN;
12514     Info.memVT = MVT::getVT(Val->getType());
12515     Info.ptrVal = Dst;
12516     Info.offset = 0;
12517     Info.align = I.getParamAlign(0).valueOrOne();
12518     Info.flags = MachineMemOperand::MOStore;
12519     // The size of the memory being operated on is unknown at this point
12520     Info.size = MemoryLocation::UnknownSize;
12521     return true;
12522   }
12523   default:
12524     break;
12525   }
12526 
12527   return false;
12528 }
12529 
12530 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
12531                                                   ISD::LoadExtType ExtTy,
12532                                                   EVT NewVT) const {
12533   // TODO: This may be worth removing. Check regression tests for diffs.
12534   if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
12535     return false;
12536 
12537   // If we're reducing the load width in order to avoid having to use an extra
12538   // instruction to do extension then it's probably a good idea.
12539   if (ExtTy != ISD::NON_EXTLOAD)
12540     return true;
12541   // Don't reduce load width if it would prevent us from combining a shift into
12542   // the offset.
12543   MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
12544   assert(Mem);
12545   const SDValue &Base = Mem->getBasePtr();
12546   if (Base.getOpcode() == ISD::ADD &&
12547       Base.getOperand(1).getOpcode() == ISD::SHL &&
12548       Base.getOperand(1).hasOneUse() &&
12549       Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
12550     // It's unknown whether a scalable vector has a power-of-2 bitwidth.
12551     if (Mem->getMemoryVT().isScalableVector())
12552       return false;
12553     // The shift can be combined if it matches the size of the value being
12554     // loaded (and so reducing the width would make it not match).
12555     uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
12556     uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
12557     if (ShiftAmount == Log2_32(LoadBytes))
12558       return false;
12559   }
12560   // We have no reason to disallow reducing the load width, so allow it.
12561   return true;
12562 }
12563 
12564 // Truncations from 64-bit GPR to 32-bit GPR is free.
12565 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
12566   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12567     return false;
12568   uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize();
12569   uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize();
12570   return NumBits1 > NumBits2;
12571 }
12572 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
12573   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12574     return false;
12575   uint64_t NumBits1 = VT1.getFixedSizeInBits();
12576   uint64_t NumBits2 = VT2.getFixedSizeInBits();
12577   return NumBits1 > NumBits2;
12578 }
12579 
12580 /// Check if it is profitable to hoist instruction in then/else to if.
12581 /// Not profitable if I and it's user can form a FMA instruction
12582 /// because we prefer FMSUB/FMADD.
12583 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
12584   if (I->getOpcode() != Instruction::FMul)
12585     return true;
12586 
12587   if (!I->hasOneUse())
12588     return true;
12589 
12590   Instruction *User = I->user_back();
12591 
12592   if (!(User->getOpcode() == Instruction::FSub ||
12593         User->getOpcode() == Instruction::FAdd))
12594     return true;
12595 
12596   const TargetOptions &Options = getTargetMachine().Options;
12597   const Function *F = I->getFunction();
12598   const DataLayout &DL = F->getParent()->getDataLayout();
12599   Type *Ty = User->getOperand(0)->getType();
12600 
12601   return !(isFMAFasterThanFMulAndFAdd(*F, Ty) &&
12602            isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
12603            (Options.AllowFPOpFusion == FPOpFusion::Fast ||
12604             Options.UnsafeFPMath));
12605 }
12606 
12607 // All 32-bit GPR operations implicitly zero the high-half of the corresponding
12608 // 64-bit GPR.
12609 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
12610   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12611     return false;
12612   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
12613   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
12614   return NumBits1 == 32 && NumBits2 == 64;
12615 }
12616 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
12617   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12618     return false;
12619   unsigned NumBits1 = VT1.getSizeInBits();
12620   unsigned NumBits2 = VT2.getSizeInBits();
12621   return NumBits1 == 32 && NumBits2 == 64;
12622 }
12623 
12624 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12625   EVT VT1 = Val.getValueType();
12626   if (isZExtFree(VT1, VT2)) {
12627     return true;
12628   }
12629 
12630   if (Val.getOpcode() != ISD::LOAD)
12631     return false;
12632 
12633   // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
12634   return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
12635           VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
12636           VT1.getSizeInBits() <= 32);
12637 }
12638 
12639 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
12640   if (isa<FPExtInst>(Ext))
12641     return false;
12642 
12643   // Vector types are not free.
12644   if (Ext->getType()->isVectorTy())
12645     return false;
12646 
12647   for (const Use &U : Ext->uses()) {
12648     // The extension is free if we can fold it with a left shift in an
12649     // addressing mode or an arithmetic operation: add, sub, and cmp.
12650 
12651     // Is there a shift?
12652     const Instruction *Instr = cast<Instruction>(U.getUser());
12653 
12654     // Is this a constant shift?
12655     switch (Instr->getOpcode()) {
12656     case Instruction::Shl:
12657       if (!isa<ConstantInt>(Instr->getOperand(1)))
12658         return false;
12659       break;
12660     case Instruction::GetElementPtr: {
12661       gep_type_iterator GTI = gep_type_begin(Instr);
12662       auto &DL = Ext->getModule()->getDataLayout();
12663       std::advance(GTI, U.getOperandNo()-1);
12664       Type *IdxTy = GTI.getIndexedType();
12665       // This extension will end up with a shift because of the scaling factor.
12666       // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
12667       // Get the shift amount based on the scaling factor:
12668       // log2(sizeof(IdxTy)) - log2(8).
12669       uint64_t ShiftAmt =
12670         countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3;
12671       // Is the constant foldable in the shift of the addressing mode?
12672       // I.e., shift amount is between 1 and 4 inclusive.
12673       if (ShiftAmt == 0 || ShiftAmt > 4)
12674         return false;
12675       break;
12676     }
12677     case Instruction::Trunc:
12678       // Check if this is a noop.
12679       // trunc(sext ty1 to ty2) to ty1.
12680       if (Instr->getType() == Ext->getOperand(0)->getType())
12681         continue;
12682       LLVM_FALLTHROUGH;
12683     default:
12684       return false;
12685     }
12686 
12687     // At this point we can use the bfm family, so this extension is free
12688     // for that use.
12689   }
12690   return true;
12691 }
12692 
12693 /// Check if both Op1 and Op2 are shufflevector extracts of either the lower
12694 /// or upper half of the vector elements.
12695 static bool areExtractShuffleVectors(Value *Op1, Value *Op2) {
12696   auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
12697     auto *FullTy = FullV->getType();
12698     auto *HalfTy = HalfV->getType();
12699     return FullTy->getPrimitiveSizeInBits().getFixedSize() ==
12700            2 * HalfTy->getPrimitiveSizeInBits().getFixedSize();
12701   };
12702 
12703   auto extractHalf = [](Value *FullV, Value *HalfV) {
12704     auto *FullVT = cast<FixedVectorType>(FullV->getType());
12705     auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
12706     return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
12707   };
12708 
12709   ArrayRef<int> M1, M2;
12710   Value *S1Op1, *S2Op1;
12711   if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
12712       !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
12713     return false;
12714 
12715   // Check that the operands are half as wide as the result and we extract
12716   // half of the elements of the input vectors.
12717   if (!areTypesHalfed(S1Op1, Op1) || !areTypesHalfed(S2Op1, Op2) ||
12718       !extractHalf(S1Op1, Op1) || !extractHalf(S2Op1, Op2))
12719     return false;
12720 
12721   // Check the mask extracts either the lower or upper half of vector
12722   // elements.
12723   int M1Start = -1;
12724   int M2Start = -1;
12725   int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
12726   if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) ||
12727       !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) ||
12728       M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2)))
12729     return false;
12730 
12731   return true;
12732 }
12733 
12734 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
12735 /// of the vector elements.
12736 static bool areExtractExts(Value *Ext1, Value *Ext2) {
12737   auto areExtDoubled = [](Instruction *Ext) {
12738     return Ext->getType()->getScalarSizeInBits() ==
12739            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
12740   };
12741 
12742   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
12743       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
12744       !areExtDoubled(cast<Instruction>(Ext1)) ||
12745       !areExtDoubled(cast<Instruction>(Ext2)))
12746     return false;
12747 
12748   return true;
12749 }
12750 
12751 /// Check if Op could be used with vmull_high_p64 intrinsic.
12752 static bool isOperandOfVmullHighP64(Value *Op) {
12753   Value *VectorOperand = nullptr;
12754   ConstantInt *ElementIndex = nullptr;
12755   return match(Op, m_ExtractElt(m_Value(VectorOperand),
12756                                 m_ConstantInt(ElementIndex))) &&
12757          ElementIndex->getValue() == 1 &&
12758          isa<FixedVectorType>(VectorOperand->getType()) &&
12759          cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
12760 }
12761 
12762 /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
12763 static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
12764   return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2);
12765 }
12766 
12767 static bool isSplatShuffle(Value *V) {
12768   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
12769     return is_splat(Shuf->getShuffleMask());
12770   return false;
12771 }
12772 
12773 /// Check if sinking \p I's operands to I's basic block is profitable, because
12774 /// the operands can be folded into a target instruction, e.g.
12775 /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
12776 bool AArch64TargetLowering::shouldSinkOperands(
12777     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
12778   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
12779     switch (II->getIntrinsicID()) {
12780     case Intrinsic::aarch64_neon_smull:
12781     case Intrinsic::aarch64_neon_umull:
12782       if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) {
12783         Ops.push_back(&II->getOperandUse(0));
12784         Ops.push_back(&II->getOperandUse(1));
12785         return true;
12786       }
12787       LLVM_FALLTHROUGH;
12788 
12789     case Intrinsic::fma:
12790       if (isa<VectorType>(I->getType()) &&
12791           cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
12792           !Subtarget->hasFullFP16())
12793         return false;
12794       LLVM_FALLTHROUGH;
12795     case Intrinsic::aarch64_neon_sqdmull:
12796     case Intrinsic::aarch64_neon_sqdmulh:
12797     case Intrinsic::aarch64_neon_sqrdmulh:
12798       // Sink splats for index lane variants
12799       if (isSplatShuffle(II->getOperand(0)))
12800         Ops.push_back(&II->getOperandUse(0));
12801       if (isSplatShuffle(II->getOperand(1)))
12802         Ops.push_back(&II->getOperandUse(1));
12803       return !Ops.empty();
12804     case Intrinsic::aarch64_sve_ptest_first:
12805     case Intrinsic::aarch64_sve_ptest_last:
12806       if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0)))
12807         if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue)
12808           Ops.push_back(&II->getOperandUse(0));
12809       return !Ops.empty();
12810     case Intrinsic::aarch64_sme_write_horiz:
12811     case Intrinsic::aarch64_sme_write_vert:
12812     case Intrinsic::aarch64_sme_writeq_horiz:
12813     case Intrinsic::aarch64_sme_writeq_vert: {
12814       auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
12815       if (!Idx || Idx->getOpcode() != Instruction::Add)
12816         return false;
12817       Ops.push_back(&II->getOperandUse(1));
12818       return true;
12819     }
12820     case Intrinsic::aarch64_sme_read_horiz:
12821     case Intrinsic::aarch64_sme_read_vert:
12822     case Intrinsic::aarch64_sme_readq_horiz:
12823     case Intrinsic::aarch64_sme_readq_vert:
12824     case Intrinsic::aarch64_sme_ld1b_vert:
12825     case Intrinsic::aarch64_sme_ld1h_vert:
12826     case Intrinsic::aarch64_sme_ld1w_vert:
12827     case Intrinsic::aarch64_sme_ld1d_vert:
12828     case Intrinsic::aarch64_sme_ld1q_vert:
12829     case Intrinsic::aarch64_sme_st1b_vert:
12830     case Intrinsic::aarch64_sme_st1h_vert:
12831     case Intrinsic::aarch64_sme_st1w_vert:
12832     case Intrinsic::aarch64_sme_st1d_vert:
12833     case Intrinsic::aarch64_sme_st1q_vert:
12834     case Intrinsic::aarch64_sme_ld1b_horiz:
12835     case Intrinsic::aarch64_sme_ld1h_horiz:
12836     case Intrinsic::aarch64_sme_ld1w_horiz:
12837     case Intrinsic::aarch64_sme_ld1d_horiz:
12838     case Intrinsic::aarch64_sme_ld1q_horiz:
12839     case Intrinsic::aarch64_sme_st1b_horiz:
12840     case Intrinsic::aarch64_sme_st1h_horiz:
12841     case Intrinsic::aarch64_sme_st1w_horiz:
12842     case Intrinsic::aarch64_sme_st1d_horiz:
12843     case Intrinsic::aarch64_sme_st1q_horiz: {
12844       auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
12845       if (!Idx || Idx->getOpcode() != Instruction::Add)
12846         return false;
12847       Ops.push_back(&II->getOperandUse(3));
12848       return true;
12849     }
12850     case Intrinsic::aarch64_neon_pmull:
12851       if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
12852         return false;
12853       Ops.push_back(&II->getOperandUse(0));
12854       Ops.push_back(&II->getOperandUse(1));
12855       return true;
12856     case Intrinsic::aarch64_neon_pmull64:
12857       if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
12858                                      II->getArgOperand(1)))
12859         return false;
12860       Ops.push_back(&II->getArgOperandUse(0));
12861       Ops.push_back(&II->getArgOperandUse(1));
12862       return true;
12863     default:
12864       return false;
12865     }
12866   }
12867 
12868   if (!I->getType()->isVectorTy())
12869     return false;
12870 
12871   switch (I->getOpcode()) {
12872   case Instruction::Sub:
12873   case Instruction::Add: {
12874     if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
12875       return false;
12876 
12877     // If the exts' operands extract either the lower or upper elements, we
12878     // can sink them too.
12879     auto Ext1 = cast<Instruction>(I->getOperand(0));
12880     auto Ext2 = cast<Instruction>(I->getOperand(1));
12881     if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
12882       Ops.push_back(&Ext1->getOperandUse(0));
12883       Ops.push_back(&Ext2->getOperandUse(0));
12884     }
12885 
12886     Ops.push_back(&I->getOperandUse(0));
12887     Ops.push_back(&I->getOperandUse(1));
12888 
12889     return true;
12890   }
12891   case Instruction::Mul: {
12892     bool IsProfitable = false;
12893     for (auto &Op : I->operands()) {
12894       // Make sure we are not already sinking this operand
12895       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
12896         continue;
12897 
12898       ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op);
12899       if (!Shuffle || !Shuffle->isZeroEltSplat())
12900         continue;
12901 
12902       Value *ShuffleOperand = Shuffle->getOperand(0);
12903       InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
12904       if (!Insert)
12905         continue;
12906 
12907       Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
12908       if (!OperandInstr)
12909         continue;
12910 
12911       ConstantInt *ElementConstant =
12912           dyn_cast<ConstantInt>(Insert->getOperand(2));
12913       // Check that the insertelement is inserting into element 0
12914       if (!ElementConstant || ElementConstant->getZExtValue() != 0)
12915         continue;
12916 
12917       unsigned Opcode = OperandInstr->getOpcode();
12918       if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt)
12919         continue;
12920 
12921       Ops.push_back(&Shuffle->getOperandUse(0));
12922       Ops.push_back(&Op);
12923       IsProfitable = true;
12924     }
12925 
12926     return IsProfitable;
12927   }
12928   default:
12929     return false;
12930   }
12931   return false;
12932 }
12933 
12934 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
12935                                           Align &RequiredAligment) const {
12936   if (!LoadedType.isSimple() ||
12937       (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
12938     return false;
12939   // Cyclone supports unaligned accesses.
12940   RequiredAligment = Align(1);
12941   unsigned NumBits = LoadedType.getSizeInBits();
12942   return NumBits == 32 || NumBits == 64;
12943 }
12944 
12945 /// A helper function for determining the number of interleaved accesses we
12946 /// will generate when lowering accesses of the given type.
12947 unsigned AArch64TargetLowering::getNumInterleavedAccesses(
12948     VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {
12949   unsigned VecSize = UseScalable ? Subtarget->getMinSVEVectorSizeInBits() : 128;
12950   return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize);
12951 }
12952 
12953 MachineMemOperand::Flags
12954 AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
12955   if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
12956       I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
12957     return MOStridedAccess;
12958   return MachineMemOperand::MONone;
12959 }
12960 
12961 bool AArch64TargetLowering::isLegalInterleavedAccessType(
12962     VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {
12963 
12964   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
12965   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
12966   unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
12967 
12968   UseScalable = false;
12969 
12970   // Ensure the number of vector elements is greater than 1.
12971   if (NumElements < 2)
12972     return false;
12973 
12974   // Ensure the element type is legal.
12975   if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
12976     return false;
12977 
12978   if (Subtarget->useSVEForFixedLengthVectors() &&
12979       (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
12980        (VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
12981         isPowerOf2_32(NumElements) && VecSize > 128))) {
12982     UseScalable = true;
12983     return true;
12984   }
12985 
12986   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
12987   // 128 will be split into multiple interleaved accesses.
12988   return VecSize == 64 || VecSize % 128 == 0;
12989 }
12990 
12991 static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {
12992   if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext()))
12993     return ScalableVectorType::get(VTy->getElementType(), 2);
12994 
12995   if (VTy->getElementType() == Type::getFloatTy(VTy->getContext()))
12996     return ScalableVectorType::get(VTy->getElementType(), 4);
12997 
12998   if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext()))
12999     return ScalableVectorType::get(VTy->getElementType(), 8);
13000 
13001   if (VTy->getElementType() == Type::getHalfTy(VTy->getContext()))
13002     return ScalableVectorType::get(VTy->getElementType(), 8);
13003 
13004   if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext()))
13005     return ScalableVectorType::get(VTy->getElementType(), 2);
13006 
13007   if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext()))
13008     return ScalableVectorType::get(VTy->getElementType(), 4);
13009 
13010   if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext()))
13011     return ScalableVectorType::get(VTy->getElementType(), 8);
13012 
13013   if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext()))
13014     return ScalableVectorType::get(VTy->getElementType(), 16);
13015 
13016   llvm_unreachable("Cannot handle input vector type");
13017 }
13018 
13019 /// Lower an interleaved load into a ldN intrinsic.
13020 ///
13021 /// E.g. Lower an interleaved load (Factor = 2):
13022 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr
13023 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
13024 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
13025 ///
13026 ///      Into:
13027 ///        %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
13028 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
13029 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
13030 bool AArch64TargetLowering::lowerInterleavedLoad(
13031     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13032     ArrayRef<unsigned> Indices, unsigned Factor) const {
13033   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13034          "Invalid interleave factor");
13035   assert(!Shuffles.empty() && "Empty shufflevector input");
13036   assert(Shuffles.size() == Indices.size() &&
13037          "Unmatched number of shufflevectors and indices");
13038 
13039   const DataLayout &DL = LI->getModule()->getDataLayout();
13040 
13041   VectorType *VTy = Shuffles[0]->getType();
13042 
13043   // Skip if we do not have NEON and skip illegal vector types. We can
13044   // "legalize" wide vector types into multiple interleaved accesses as long as
13045   // the vector types are divisible by 128.
13046   bool UseScalable;
13047   if (!Subtarget->hasNEON() ||
13048       !isLegalInterleavedAccessType(VTy, DL, UseScalable))
13049     return false;
13050 
13051   unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
13052 
13053   auto *FVTy = cast<FixedVectorType>(VTy);
13054 
13055   // A pointer vector can not be the return type of the ldN intrinsics. Need to
13056   // load integer vectors first and then convert to pointer vectors.
13057   Type *EltTy = FVTy->getElementType();
13058   if (EltTy->isPointerTy())
13059     FVTy =
13060         FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements());
13061 
13062   // If we're going to generate more than one load, reset the sub-vector type
13063   // to something legal.
13064   FVTy = FixedVectorType::get(FVTy->getElementType(),
13065                               FVTy->getNumElements() / NumLoads);
13066 
13067   auto *LDVTy =
13068       UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy;
13069 
13070   IRBuilder<> Builder(LI);
13071 
13072   // The base address of the load.
13073   Value *BaseAddr = LI->getPointerOperand();
13074 
13075   if (NumLoads > 1) {
13076     // We will compute the pointer operand of each load from the original base
13077     // address using GEPs. Cast the base address to a pointer to the scalar
13078     // element type.
13079     BaseAddr = Builder.CreateBitCast(
13080         BaseAddr,
13081         LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
13082   }
13083 
13084   Type *PtrTy =
13085       UseScalable
13086           ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())
13087           : LDVTy->getPointerTo(LI->getPointerAddressSpace());
13088   Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
13089                                  LDVTy->getElementCount());
13090 
13091   static const Intrinsic::ID SVELoadIntrs[3] = {
13092       Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret,
13093       Intrinsic::aarch64_sve_ld4_sret};
13094   static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2,
13095                                                  Intrinsic::aarch64_neon_ld3,
13096                                                  Intrinsic::aarch64_neon_ld4};
13097   Function *LdNFunc;
13098   if (UseScalable)
13099     LdNFunc = Intrinsic::getDeclaration(LI->getModule(),
13100                                         SVELoadIntrs[Factor - 2], {LDVTy});
13101   else
13102     LdNFunc = Intrinsic::getDeclaration(
13103         LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy});
13104 
13105   // Holds sub-vectors extracted from the load intrinsic return values. The
13106   // sub-vectors are associated with the shufflevector instructions they will
13107   // replace.
13108   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13109 
13110   Value *PTrue = nullptr;
13111   if (UseScalable) {
13112     Optional<unsigned> PgPattern =
13113         getSVEPredPatternFromNumElements(FVTy->getNumElements());
13114     if (Subtarget->getMinSVEVectorSizeInBits() ==
13115             Subtarget->getMaxSVEVectorSizeInBits() &&
13116         Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy))
13117       PgPattern = AArch64SVEPredPattern::all;
13118 
13119     auto *PTruePat =
13120         ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
13121     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13122                                     {PTruePat});
13123   }
13124 
13125   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13126 
13127     // If we're generating more than one load, compute the base address of
13128     // subsequent loads as an offset from the previous.
13129     if (LoadCount > 0)
13130       BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr,
13131                                             FVTy->getNumElements() * Factor);
13132 
13133     CallInst *LdN;
13134     if (UseScalable)
13135       LdN = Builder.CreateCall(
13136           LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
13137     else
13138       LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
13139                                "ldN");
13140 
13141     // Extract and store the sub-vectors returned by the load intrinsic.
13142     for (unsigned i = 0; i < Shuffles.size(); i++) {
13143       ShuffleVectorInst *SVI = Shuffles[i];
13144       unsigned Index = Indices[i];
13145 
13146       Value *SubVec = Builder.CreateExtractValue(LdN, Index);
13147 
13148       if (UseScalable)
13149         SubVec = Builder.CreateExtractVector(
13150             FVTy, SubVec,
13151             ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0));
13152 
13153       // Convert the integer vector to pointer vector if the element is pointer.
13154       if (EltTy->isPointerTy())
13155         SubVec = Builder.CreateIntToPtr(
13156             SubVec, FixedVectorType::get(SVI->getType()->getElementType(),
13157                                          FVTy->getNumElements()));
13158 
13159       SubVecs[SVI].push_back(SubVec);
13160     }
13161   }
13162 
13163   // Replace uses of the shufflevector instructions with the sub-vectors
13164   // returned by the load intrinsic. If a shufflevector instruction is
13165   // associated with more than one sub-vector, those sub-vectors will be
13166   // concatenated into a single wide vector.
13167   for (ShuffleVectorInst *SVI : Shuffles) {
13168     auto &SubVec = SubVecs[SVI];
13169     auto *WideVec =
13170         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13171     SVI->replaceAllUsesWith(WideVec);
13172   }
13173 
13174   return true;
13175 }
13176 
13177 /// Lower an interleaved store into a stN intrinsic.
13178 ///
13179 /// E.g. Lower an interleaved store (Factor = 3):
13180 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13181 ///                 <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13182 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13183 ///
13184 ///      Into:
13185 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13186 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13187 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13188 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13189 ///
13190 /// Note that the new shufflevectors will be removed and we'll only generate one
13191 /// st3 instruction in CodeGen.
13192 ///
13193 /// Example for a more general valid mask (Factor 3). Lower:
13194 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13195 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13196 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13197 ///
13198 ///      Into:
13199 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13200 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13201 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13202 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13203 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
13204                                                   ShuffleVectorInst *SVI,
13205                                                   unsigned Factor) const {
13206   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13207          "Invalid interleave factor");
13208 
13209   auto *VecTy = cast<FixedVectorType>(SVI->getType());
13210   assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
13211 
13212   unsigned LaneLen = VecTy->getNumElements() / Factor;
13213   Type *EltTy = VecTy->getElementType();
13214   auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
13215 
13216   const DataLayout &DL = SI->getModule()->getDataLayout();
13217   bool UseScalable;
13218 
13219   // Skip if we do not have NEON and skip illegal vector types. We can
13220   // "legalize" wide vector types into multiple interleaved accesses as long as
13221   // the vector types are divisible by 128.
13222   if (!Subtarget->hasNEON() ||
13223       !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
13224     return false;
13225 
13226   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
13227 
13228   Value *Op0 = SVI->getOperand(0);
13229   Value *Op1 = SVI->getOperand(1);
13230   IRBuilder<> Builder(SI);
13231 
13232   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13233   // vectors to integer vectors.
13234   if (EltTy->isPointerTy()) {
13235     Type *IntTy = DL.getIntPtrType(EltTy);
13236     unsigned NumOpElts =
13237         cast<FixedVectorType>(Op0->getType())->getNumElements();
13238 
13239     // Convert to the corresponding integer vector.
13240     auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts);
13241     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13242     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13243 
13244     SubVecTy = FixedVectorType::get(IntTy, LaneLen);
13245   }
13246 
13247   // If we're going to generate more than one store, reset the lane length
13248   // and sub-vector type to something legal.
13249   LaneLen /= NumStores;
13250   SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
13251 
13252   auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy))
13253                             : SubVecTy;
13254 
13255   // The base address of the store.
13256   Value *BaseAddr = SI->getPointerOperand();
13257 
13258   if (NumStores > 1) {
13259     // We will compute the pointer operand of each store from the original base
13260     // address using GEPs. Cast the base address to a pointer to the scalar
13261     // element type.
13262     BaseAddr = Builder.CreateBitCast(
13263         BaseAddr,
13264         SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
13265   }
13266 
13267   auto Mask = SVI->getShuffleMask();
13268 
13269   Type *PtrTy =
13270       UseScalable
13271           ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())
13272           : STVTy->getPointerTo(SI->getPointerAddressSpace());
13273   Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()),
13274                                  STVTy->getElementCount());
13275 
13276   static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2,
13277                                                  Intrinsic::aarch64_sve_st3,
13278                                                  Intrinsic::aarch64_sve_st4};
13279   static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2,
13280                                                   Intrinsic::aarch64_neon_st3,
13281                                                   Intrinsic::aarch64_neon_st4};
13282   Function *StNFunc;
13283   if (UseScalable)
13284     StNFunc = Intrinsic::getDeclaration(SI->getModule(),
13285                                         SVEStoreIntrs[Factor - 2], {STVTy});
13286   else
13287     StNFunc = Intrinsic::getDeclaration(
13288         SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy});
13289 
13290   Value *PTrue = nullptr;
13291   if (UseScalable) {
13292     Optional<unsigned> PgPattern =
13293         getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
13294     if (Subtarget->getMinSVEVectorSizeInBits() ==
13295             Subtarget->getMaxSVEVectorSizeInBits() &&
13296         Subtarget->getMinSVEVectorSizeInBits() ==
13297             DL.getTypeSizeInBits(SubVecTy))
13298       PgPattern = AArch64SVEPredPattern::all;
13299 
13300     auto *PTruePat =
13301         ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
13302     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13303                                     {PTruePat});
13304   }
13305 
13306   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13307 
13308     SmallVector<Value *, 5> Ops;
13309 
13310     // Split the shufflevector operands into sub vectors for the new stN call.
13311     for (unsigned i = 0; i < Factor; i++) {
13312       Value *Shuffle;
13313       unsigned IdxI = StoreCount * LaneLen * Factor + i;
13314       if (Mask[IdxI] >= 0) {
13315         Shuffle = Builder.CreateShuffleVector(
13316             Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0));
13317       } else {
13318         unsigned StartMask = 0;
13319         for (unsigned j = 1; j < LaneLen; j++) {
13320           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13321           if (Mask[IdxJ * Factor + IdxI] >= 0) {
13322             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13323             break;
13324           }
13325         }
13326         // Note: Filling undef gaps with random elements is ok, since
13327         // those elements were being written anyway (with undefs).
13328         // In the case of all undefs we're defaulting to using elems from 0
13329         // Note: StartMask cannot be negative, it's checked in
13330         // isReInterleaveMask
13331         Shuffle = Builder.CreateShuffleVector(
13332             Op0, Op1, createSequentialMask(StartMask, LaneLen, 0));
13333       }
13334 
13335       if (UseScalable)
13336         Shuffle = Builder.CreateInsertVector(
13337             STVTy, UndefValue::get(STVTy), Shuffle,
13338             ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0));
13339 
13340       Ops.push_back(Shuffle);
13341     }
13342 
13343     if (UseScalable)
13344       Ops.push_back(PTrue);
13345 
13346     // If we generating more than one store, we compute the base address of
13347     // subsequent stores as an offset from the previous.
13348     if (StoreCount > 0)
13349       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
13350                                             BaseAddr, LaneLen * Factor);
13351 
13352     Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
13353     Builder.CreateCall(StNFunc, Ops);
13354   }
13355   return true;
13356 }
13357 
13358 // Lower an SVE structured load intrinsic returning a tuple type to target
13359 // specific intrinsic taking the same input but returning a multi-result value
13360 // of the split tuple type.
13361 //
13362 // E.g. Lowering an LD3:
13363 //
13364 //  call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32(
13365 //                                                    <vscale x 4 x i1> %pred,
13366 //                                                    <vscale x 4 x i32>* %addr)
13367 //
13368 //  Output DAG:
13369 //
13370 //    t0: ch = EntryToken
13371 //        t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0
13372 //        t4: i64,ch = CopyFromReg t0, Register:i64 %1
13373 //    t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4
13374 //    t6: nxv12i32 = concat_vectors t5, t5:1, t5:2
13375 //
13376 // This is called pre-legalization to avoid widening/splitting issues with
13377 // non-power-of-2 tuple types used for LD3, such as nxv12i32.
13378 SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic,
13379                                                   ArrayRef<SDValue> LoadOps,
13380                                                   EVT VT, SelectionDAG &DAG,
13381                                                   const SDLoc &DL) const {
13382   assert(VT.isScalableVector() && "Can only lower scalable vectors");
13383 
13384   unsigned N, Opcode;
13385   static const std::pair<unsigned, std::pair<unsigned, unsigned>>
13386       IntrinsicMap[] = {
13387           {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}},
13388           {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}},
13389           {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}};
13390 
13391   std::tie(N, Opcode) = llvm::find_if(IntrinsicMap, [&](auto P) {
13392                           return P.first == Intrinsic;
13393                         })->second;
13394   assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 &&
13395          "invalid tuple vector type!");
13396 
13397   EVT SplitVT =
13398       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
13399                        VT.getVectorElementCount().divideCoefficientBy(N));
13400   assert(isTypeLegal(SplitVT));
13401 
13402   SmallVector<EVT, 5> VTs(N, SplitVT);
13403   VTs.push_back(MVT::Other); // Chain
13404   SDVTList NodeTys = DAG.getVTList(VTs);
13405 
13406   SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps);
13407   SmallVector<SDValue, 4> PseudoLoadOps;
13408   for (unsigned I = 0; I < N; ++I)
13409     PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I));
13410   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps);
13411 }
13412 
13413 EVT AArch64TargetLowering::getOptimalMemOpType(
13414     const MemOp &Op, const AttributeList &FuncAttributes) const {
13415   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13416   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13417   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13418   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13419   // taken one instruction to materialize the v2i64 zero and one store (with
13420   // restrictive addressing mode). Just do i64 stores.
13421   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13422   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13423     if (Op.isAligned(AlignCheck))
13424       return true;
13425     bool Fast;
13426     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13427                                           MachineMemOperand::MONone, &Fast) &&
13428            Fast;
13429   };
13430 
13431   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13432       AlignmentIsAcceptable(MVT::v16i8, Align(16)))
13433     return MVT::v16i8;
13434   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13435     return MVT::f128;
13436   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13437     return MVT::i64;
13438   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13439     return MVT::i32;
13440   return MVT::Other;
13441 }
13442 
13443 LLT AArch64TargetLowering::getOptimalMemOpLLT(
13444     const MemOp &Op, const AttributeList &FuncAttributes) const {
13445   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13446   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13447   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13448   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13449   // taken one instruction to materialize the v2i64 zero and one store (with
13450   // restrictive addressing mode). Just do i64 stores.
13451   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13452   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13453     if (Op.isAligned(AlignCheck))
13454       return true;
13455     bool Fast;
13456     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13457                                           MachineMemOperand::MONone, &Fast) &&
13458            Fast;
13459   };
13460 
13461   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13462       AlignmentIsAcceptable(MVT::v2i64, Align(16)))
13463     return LLT::fixed_vector(2, 64);
13464   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13465     return LLT::scalar(128);
13466   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13467     return LLT::scalar(64);
13468   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13469     return LLT::scalar(32);
13470   return LLT();
13471 }
13472 
13473 // 12-bit optionally shifted immediates are legal for adds.
13474 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
13475   if (Immed == std::numeric_limits<int64_t>::min()) {
13476     LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
13477                       << ": avoid UB for INT64_MIN\n");
13478     return false;
13479   }
13480   // Same encoding for add/sub, just flip the sign.
13481   Immed = std::abs(Immed);
13482   bool IsLegal = ((Immed >> 12) == 0 ||
13483                   ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
13484   LLVM_DEBUG(dbgs() << "Is " << Immed
13485                     << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
13486   return IsLegal;
13487 }
13488 
13489 // Return false to prevent folding
13490 // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine,
13491 // if the folding leads to worse code.
13492 bool AArch64TargetLowering::isMulAddWithConstProfitable(
13493     SDValue AddNode, SDValue ConstNode) const {
13494   // Let the DAGCombiner decide for vector types and large types.
13495   const EVT VT = AddNode.getValueType();
13496   if (VT.isVector() || VT.getScalarSizeInBits() > 64)
13497     return true;
13498 
13499   // It is worse if c1 is legal add immediate, while c1*c2 is not
13500   // and has to be composed by at least two instructions.
13501   const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
13502   const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
13503   const int64_t C1 = C1Node->getSExtValue();
13504   const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue();
13505   if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue()))
13506     return true;
13507   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
13508   AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn);
13509   if (Insn.size() > 1)
13510     return false;
13511 
13512   // Default to true and let the DAGCombiner decide.
13513   return true;
13514 }
13515 
13516 // Integer comparisons are implemented with ADDS/SUBS, so the range of valid
13517 // immediates is the same as for an add or a sub.
13518 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
13519   return isLegalAddImmediate(Immed);
13520 }
13521 
13522 /// isLegalAddressingMode - Return true if the addressing mode represented
13523 /// by AM is legal for this target, for a load/store of the specified type.
13524 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
13525                                                   const AddrMode &AM, Type *Ty,
13526                                                   unsigned AS, Instruction *I) const {
13527   // AArch64 has five basic addressing modes:
13528   //  reg
13529   //  reg + 9-bit signed offset
13530   //  reg + SIZE_IN_BYTES * 12-bit unsigned offset
13531   //  reg1 + reg2
13532   //  reg + SIZE_IN_BYTES * reg
13533 
13534   // No global is ever allowed as a base.
13535   if (AM.BaseGV)
13536     return false;
13537 
13538   // No reg+reg+imm addressing.
13539   if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
13540     return false;
13541 
13542   // FIXME: Update this method to support scalable addressing modes.
13543   if (isa<ScalableVectorType>(Ty)) {
13544     uint64_t VecElemNumBytes =
13545         DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
13546     return AM.HasBaseReg && !AM.BaseOffs &&
13547            (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
13548   }
13549 
13550   // check reg + imm case:
13551   // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
13552   uint64_t NumBytes = 0;
13553   if (Ty->isSized()) {
13554     uint64_t NumBits = DL.getTypeSizeInBits(Ty);
13555     NumBytes = NumBits / 8;
13556     if (!isPowerOf2_64(NumBits))
13557       NumBytes = 0;
13558   }
13559 
13560   if (!AM.Scale) {
13561     int64_t Offset = AM.BaseOffs;
13562 
13563     // 9-bit signed offset
13564     if (isInt<9>(Offset))
13565       return true;
13566 
13567     // 12-bit unsigned offset
13568     unsigned shift = Log2_64(NumBytes);
13569     if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
13570         // Must be a multiple of NumBytes (NumBytes is a power of 2)
13571         (Offset >> shift) << shift == Offset)
13572       return true;
13573     return false;
13574   }
13575 
13576   // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
13577 
13578   return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
13579 }
13580 
13581 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
13582   // Consider splitting large offset of struct or array.
13583   return true;
13584 }
13585 
13586 InstructionCost AArch64TargetLowering::getScalingFactorCost(
13587     const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const {
13588   // Scaling factors are not free at all.
13589   // Operands                     | Rt Latency
13590   // -------------------------------------------
13591   // Rt, [Xn, Xm]                 | 4
13592   // -------------------------------------------
13593   // Rt, [Xn, Xm, lsl #imm]       | Rn: 4 Rm: 5
13594   // Rt, [Xn, Wm, <extend> #imm]  |
13595   if (isLegalAddressingMode(DL, AM, Ty, AS))
13596     // Scale represents reg2 * scale, thus account for 1 if
13597     // it is not equal to 0 or 1.
13598     return AM.Scale != 0 && AM.Scale != 1;
13599   return -1;
13600 }
13601 
13602 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
13603     const MachineFunction &MF, EVT VT) const {
13604   VT = VT.getScalarType();
13605 
13606   if (!VT.isSimple())
13607     return false;
13608 
13609   switch (VT.getSimpleVT().SimpleTy) {
13610   case MVT::f16:
13611     return Subtarget->hasFullFP16();
13612   case MVT::f32:
13613   case MVT::f64:
13614     return true;
13615   default:
13616     break;
13617   }
13618 
13619   return false;
13620 }
13621 
13622 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
13623                                                        Type *Ty) const {
13624   switch (Ty->getScalarType()->getTypeID()) {
13625   case Type::FloatTyID:
13626   case Type::DoubleTyID:
13627     return true;
13628   default:
13629     return false;
13630   }
13631 }
13632 
13633 bool AArch64TargetLowering::generateFMAsInMachineCombiner(
13634     EVT VT, CodeGenOpt::Level OptLevel) const {
13635   return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() &&
13636          !useSVEForFixedLengthVectorVT(VT);
13637 }
13638 
13639 const MCPhysReg *
13640 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
13641   // LR is a callee-save register, but we must treat it as clobbered by any call
13642   // site. Hence we include LR in the scratch registers, which are in turn added
13643   // as implicit-defs for stackmaps and patchpoints.
13644   static const MCPhysReg ScratchRegs[] = {
13645     AArch64::X16, AArch64::X17, AArch64::LR, 0
13646   };
13647   return ScratchRegs;
13648 }
13649 
13650 bool
13651 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
13652                                                      CombineLevel Level) const {
13653   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
13654           N->getOpcode() == ISD::SRL) &&
13655          "Expected shift op");
13656 
13657   SDValue ShiftLHS = N->getOperand(0);
13658   EVT VT = N->getValueType(0);
13659 
13660   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not combine
13661   // it with shift 'N' to let it be lowered to UBFX.
13662   if (ShiftLHS.getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
13663       isa<ConstantSDNode>(ShiftLHS.getOperand(1))) {
13664     uint64_t TruncMask = ShiftLHS.getConstantOperandVal(1);
13665     if (isMask_64(TruncMask) &&
13666         ShiftLHS.getOperand(0).getOpcode() == ISD::SRL &&
13667         isa<ConstantSDNode>(ShiftLHS.getOperand(0).getOperand(1)))
13668       return false;
13669   }
13670   return true;
13671 }
13672 
13673 bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
13674     const SDNode *N) const {
13675   assert(N->getOpcode() == ISD::XOR &&
13676          (N->getOperand(0).getOpcode() == ISD::SHL ||
13677           N->getOperand(0).getOpcode() == ISD::SRL) &&
13678          "Expected XOR(SHIFT) pattern");
13679 
13680   // Only commute if the entire NOT mask is a hidden shifted mask.
13681   auto *XorC = dyn_cast<ConstantSDNode>(N->getOperand(1));
13682   auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13683   if (XorC && ShiftC) {
13684     unsigned MaskIdx, MaskLen;
13685     if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) {
13686       unsigned ShiftAmt = ShiftC->getZExtValue();
13687       unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
13688       if (N->getOperand(0).getOpcode() == ISD::SHL)
13689         return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt);
13690       return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt);
13691     }
13692   }
13693 
13694   return false;
13695 }
13696 
13697 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
13698     const SDNode *N, CombineLevel Level) const {
13699   assert(((N->getOpcode() == ISD::SHL &&
13700            N->getOperand(0).getOpcode() == ISD::SRL) ||
13701           (N->getOpcode() == ISD::SRL &&
13702            N->getOperand(0).getOpcode() == ISD::SHL)) &&
13703          "Expected shift-shift mask");
13704   // Don't allow multiuse shift folding with the same shift amount.
13705   if (!N->getOperand(0)->hasOneUse())
13706     return false;
13707 
13708   // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns.
13709   EVT VT = N->getValueType(0);
13710   if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) {
13711     auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13712     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
13713     return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue());
13714   }
13715 
13716   return true;
13717 }
13718 
13719 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13720                                                               Type *Ty) const {
13721   assert(Ty->isIntegerTy());
13722 
13723   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13724   if (BitSize == 0)
13725     return false;
13726 
13727   int64_t Val = Imm.getSExtValue();
13728   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
13729     return true;
13730 
13731   if ((int64_t)Val < 0)
13732     Val = ~Val;
13733   if (BitSize == 32)
13734     Val &= (1LL << 32) - 1;
13735 
13736   unsigned LZ = countLeadingZeros((uint64_t)Val);
13737   unsigned Shift = (63 - LZ) / 16;
13738   // MOVZ is free so return true for one or fewer MOVK.
13739   return Shift < 3;
13740 }
13741 
13742 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
13743                                                     unsigned Index) const {
13744   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13745     return false;
13746 
13747   return (Index == 0 || Index == ResVT.getVectorMinNumElements());
13748 }
13749 
13750 /// Turn vector tests of the signbit in the form of:
13751 ///   xor (sra X, elt_size(X)-1), -1
13752 /// into:
13753 ///   cmge X, X, #0
13754 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
13755                                          const AArch64Subtarget *Subtarget) {
13756   EVT VT = N->getValueType(0);
13757   if (!Subtarget->hasNEON() || !VT.isVector())
13758     return SDValue();
13759 
13760   // There must be a shift right algebraic before the xor, and the xor must be a
13761   // 'not' operation.
13762   SDValue Shift = N->getOperand(0);
13763   SDValue Ones = N->getOperand(1);
13764   if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
13765       !ISD::isBuildVectorAllOnes(Ones.getNode()))
13766     return SDValue();
13767 
13768   // The shift should be smearing the sign bit across each vector element.
13769   auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
13770   EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
13771   if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
13772     return SDValue();
13773 
13774   return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
13775 }
13776 
13777 // Given a vecreduce_add node, detect the below pattern and convert it to the
13778 // node sequence with UABDL, [S|U]ADB and UADDLP.
13779 //
13780 // i32 vecreduce_add(
13781 //  v16i32 abs(
13782 //    v16i32 sub(
13783 //     v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b))))
13784 // =================>
13785 // i32 vecreduce_add(
13786 //   v4i32 UADDLP(
13787 //     v8i16 add(
13788 //       v8i16 zext(
13789 //         v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b
13790 //       v8i16 zext(
13791 //         v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b
13792 static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
13793                                                     SelectionDAG &DAG) {
13794   // Assumed i32 vecreduce_add
13795   if (N->getValueType(0) != MVT::i32)
13796     return SDValue();
13797 
13798   SDValue VecReduceOp0 = N->getOperand(0);
13799   unsigned Opcode = VecReduceOp0.getOpcode();
13800   // Assumed v16i32 abs
13801   if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32)
13802     return SDValue();
13803 
13804   SDValue ABS = VecReduceOp0;
13805   // Assumed v16i32 sub
13806   if (ABS->getOperand(0)->getOpcode() != ISD::SUB ||
13807       ABS->getOperand(0)->getValueType(0) != MVT::v16i32)
13808     return SDValue();
13809 
13810   SDValue SUB = ABS->getOperand(0);
13811   unsigned Opcode0 = SUB->getOperand(0).getOpcode();
13812   unsigned Opcode1 = SUB->getOperand(1).getOpcode();
13813   // Assumed v16i32 type
13814   if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 ||
13815       SUB->getOperand(1)->getValueType(0) != MVT::v16i32)
13816     return SDValue();
13817 
13818   // Assumed zext or sext
13819   bool IsZExt = false;
13820   if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) {
13821     IsZExt = true;
13822   } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) {
13823     IsZExt = false;
13824   } else
13825     return SDValue();
13826 
13827   SDValue EXT0 = SUB->getOperand(0);
13828   SDValue EXT1 = SUB->getOperand(1);
13829   // Assumed zext's operand has v16i8 type
13830   if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 ||
13831       EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
13832     return SDValue();
13833 
13834   // Pattern is dectected. Let's convert it to sequence of nodes.
13835   SDLoc DL(N);
13836 
13837   // First, create the node pattern of UABD/SABD.
13838   SDValue UABDHigh8Op0 =
13839       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13840                   DAG.getConstant(8, DL, MVT::i64));
13841   SDValue UABDHigh8Op1 =
13842       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13843                   DAG.getConstant(8, DL, MVT::i64));
13844   SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13845                                   UABDHigh8Op0, UABDHigh8Op1);
13846   SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8);
13847 
13848   // Second, create the node pattern of UABAL.
13849   SDValue UABDLo8Op0 =
13850       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13851                   DAG.getConstant(0, DL, MVT::i64));
13852   SDValue UABDLo8Op1 =
13853       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13854                   DAG.getConstant(0, DL, MVT::i64));
13855   SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13856                                 UABDLo8Op0, UABDLo8Op1);
13857   SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8);
13858   SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD);
13859 
13860   // Third, create the node of UADDLP.
13861   SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL);
13862 
13863   // Fourth, create the node of VECREDUCE_ADD.
13864   return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
13865 }
13866 
13867 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
13868 //   vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
13869 //   vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
13870 static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
13871                                           const AArch64Subtarget *ST) {
13872   if (!ST->hasDotProd())
13873     return performVecReduceAddCombineWithUADDLP(N, DAG);
13874 
13875   SDValue Op0 = N->getOperand(0);
13876   if (N->getValueType(0) != MVT::i32 ||
13877       Op0.getValueType().getVectorElementType() != MVT::i32)
13878     return SDValue();
13879 
13880   unsigned ExtOpcode = Op0.getOpcode();
13881   SDValue A = Op0;
13882   SDValue B;
13883   if (ExtOpcode == ISD::MUL) {
13884     A = Op0.getOperand(0);
13885     B = Op0.getOperand(1);
13886     if (A.getOpcode() != B.getOpcode() ||
13887         A.getOperand(0).getValueType() != B.getOperand(0).getValueType())
13888       return SDValue();
13889     ExtOpcode = A.getOpcode();
13890   }
13891   if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND)
13892     return SDValue();
13893 
13894   EVT Op0VT = A.getOperand(0).getValueType();
13895   if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8)
13896     return SDValue();
13897 
13898   SDLoc DL(Op0);
13899   // For non-mla reductions B can be set to 1. For MLA we take the operand of
13900   // the extend B.
13901   if (!B)
13902     B = DAG.getConstant(1, DL, Op0VT);
13903   else
13904     B = B.getOperand(0);
13905 
13906   SDValue Zeros =
13907       DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32);
13908   auto DotOpcode =
13909       (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT;
13910   SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros,
13911                             A.getOperand(0), B);
13912   return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
13913 }
13914 
13915 // Given an (integer) vecreduce, we know the order of the inputs does not
13916 // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x))))
13917 // into UADDV(UADDLP(x)). This can also happen through an extra add, where we
13918 // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))).
13919 static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
13920   auto DetectAddExtract = [&](SDValue A) {
13921     // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning
13922     // UADDLP(x) if found.
13923     if (A.getOpcode() != ISD::ADD)
13924       return SDValue();
13925     EVT VT = A.getValueType();
13926     SDValue Op0 = A.getOperand(0);
13927     SDValue Op1 = A.getOperand(1);
13928     if (Op0.getOpcode() != Op0.getOpcode() ||
13929         (Op0.getOpcode() != ISD::ZERO_EXTEND &&
13930          Op0.getOpcode() != ISD::SIGN_EXTEND))
13931       return SDValue();
13932     SDValue Ext0 = Op0.getOperand(0);
13933     SDValue Ext1 = Op1.getOperand(0);
13934     if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13935         Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13936         Ext0.getOperand(0) != Ext1.getOperand(0))
13937       return SDValue();
13938     // Check that the type is twice the add types, and the extract are from
13939     // upper/lower parts of the same source.
13940     if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
13941         VT.getVectorNumElements() * 2)
13942       return SDValue();
13943     if ((Ext0.getConstantOperandVal(1) != 0 &&
13944          Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
13945         (Ext1.getConstantOperandVal(1) != 0 &&
13946          Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
13947       return SDValue();
13948     unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
13949                                                           : AArch64ISD::SADDLP;
13950     return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0));
13951   };
13952 
13953   SDValue A = N->getOperand(0);
13954   if (SDValue R = DetectAddExtract(A))
13955     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R);
13956   if (A.getOpcode() == ISD::ADD) {
13957     if (SDValue R = DetectAddExtract(A.getOperand(0)))
13958       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13959                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13960                                      A.getOperand(1)));
13961     if (SDValue R = DetectAddExtract(A.getOperand(1)))
13962       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13963                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13964                                      A.getOperand(0)));
13965   }
13966   return SDValue();
13967 }
13968 
13969 
13970 static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
13971                                  TargetLowering::DAGCombinerInfo &DCI,
13972                                  const AArch64Subtarget *Subtarget) {
13973   if (DCI.isBeforeLegalizeOps())
13974     return SDValue();
13975 
13976   return foldVectorXorShiftIntoCmp(N, DAG, Subtarget);
13977 }
13978 
13979 SDValue
13980 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13981                                      SelectionDAG &DAG,
13982                                      SmallVectorImpl<SDNode *> &Created) const {
13983   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
13984   if (isIntDivCheap(N->getValueType(0), Attr))
13985     return SDValue(N,0); // Lower SDIV as SDIV
13986 
13987   EVT VT = N->getValueType(0);
13988 
13989   // For scalable and fixed types, mark them as cheap so we can handle it much
13990   // later. This allows us to handle larger than legal types.
13991   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
13992     return SDValue(N, 0);
13993 
13994   // fold (sdiv X, pow2)
13995   if ((VT != MVT::i32 && VT != MVT::i64) ||
13996       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
13997     return SDValue();
13998 
13999   SDLoc DL(N);
14000   SDValue N0 = N->getOperand(0);
14001   unsigned Lg2 = Divisor.countTrailingZeros();
14002   SDValue Zero = DAG.getConstant(0, DL, VT);
14003   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
14004 
14005   // Add (N0 < 0) ? Pow2 - 1 : 0;
14006   SDValue CCVal;
14007   SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL);
14008   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
14009   SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp);
14010 
14011   Created.push_back(Cmp.getNode());
14012   Created.push_back(Add.getNode());
14013   Created.push_back(CSel.getNode());
14014 
14015   // Divide by pow2.
14016   SDValue SRA =
14017       DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
14018 
14019   // If we're dividing by a positive value, we're done.  Otherwise, we must
14020   // negate the result.
14021   if (Divisor.isNonNegative())
14022     return SRA;
14023 
14024   Created.push_back(SRA.getNode());
14025   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
14026 }
14027 
14028 SDValue
14029 AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
14030                                      SelectionDAG &DAG,
14031                                      SmallVectorImpl<SDNode *> &Created) const {
14032   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
14033   if (isIntDivCheap(N->getValueType(0), Attr))
14034     return SDValue(N, 0); // Lower SREM as SREM
14035 
14036   EVT VT = N->getValueType(0);
14037 
14038   // For scalable and fixed types, mark them as cheap so we can handle it much
14039   // later. This allows us to handle larger than legal types.
14040   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
14041     return SDValue(N, 0);
14042 
14043   // fold (srem X, pow2)
14044   if ((VT != MVT::i32 && VT != MVT::i64) ||
14045       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
14046     return SDValue();
14047 
14048   unsigned Lg2 = Divisor.countTrailingZeros();
14049   if (Lg2 == 0)
14050     return SDValue();
14051 
14052   SDLoc DL(N);
14053   SDValue N0 = N->getOperand(0);
14054   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
14055   SDValue Zero = DAG.getConstant(0, DL, VT);
14056   SDValue CCVal, CSNeg;
14057   if (Lg2 == 1) {
14058     SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL);
14059     SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14060     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp);
14061 
14062     Created.push_back(Cmp.getNode());
14063     Created.push_back(And.getNode());
14064   } else {
14065     SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC);
14066     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
14067 
14068     SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0);
14069     SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
14070     SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne);
14071     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal,
14072                         Negs.getValue(1));
14073 
14074     Created.push_back(Negs.getNode());
14075     Created.push_back(AndPos.getNode());
14076     Created.push_back(AndNeg.getNode());
14077   }
14078 
14079   return CSNeg;
14080 }
14081 
14082 static bool IsSVECntIntrinsic(SDValue S) {
14083   switch(getIntrinsicID(S.getNode())) {
14084   default:
14085     break;
14086   case Intrinsic::aarch64_sve_cntb:
14087   case Intrinsic::aarch64_sve_cnth:
14088   case Intrinsic::aarch64_sve_cntw:
14089   case Intrinsic::aarch64_sve_cntd:
14090     return true;
14091   }
14092   return false;
14093 }
14094 
14095 /// Calculates what the pre-extend type is, based on the extension
14096 /// operation node provided by \p Extend.
14097 ///
14098 /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the
14099 /// pre-extend type is pulled directly from the operand, while other extend
14100 /// operations need a bit more inspection to get this information.
14101 ///
14102 /// \param Extend The SDNode from the DAG that represents the extend operation
14103 ///
14104 /// \returns The type representing the \p Extend source type, or \p MVT::Other
14105 /// if no valid type can be determined
14106 static EVT calculatePreExtendType(SDValue Extend) {
14107   switch (Extend.getOpcode()) {
14108   case ISD::SIGN_EXTEND:
14109   case ISD::ZERO_EXTEND:
14110     return Extend.getOperand(0).getValueType();
14111   case ISD::AssertSext:
14112   case ISD::AssertZext:
14113   case ISD::SIGN_EXTEND_INREG: {
14114     VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1));
14115     if (!TypeNode)
14116       return MVT::Other;
14117     return TypeNode->getVT();
14118   }
14119   case ISD::AND: {
14120     ConstantSDNode *Constant =
14121         dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode());
14122     if (!Constant)
14123       return MVT::Other;
14124 
14125     uint32_t Mask = Constant->getZExtValue();
14126 
14127     if (Mask == UCHAR_MAX)
14128       return MVT::i8;
14129     else if (Mask == USHRT_MAX)
14130       return MVT::i16;
14131     else if (Mask == UINT_MAX)
14132       return MVT::i32;
14133 
14134     return MVT::Other;
14135   }
14136   default:
14137     return MVT::Other;
14138   }
14139 }
14140 
14141 /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern
14142 /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector
14143 /// SExt/ZExt rather than the scalar SExt/ZExt
14144 static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
14145   EVT VT = BV.getValueType();
14146   if (BV.getOpcode() != ISD::BUILD_VECTOR &&
14147       BV.getOpcode() != ISD::VECTOR_SHUFFLE)
14148     return SDValue();
14149 
14150   // Use the first item in the buildvector/shuffle to get the size of the
14151   // extend, and make sure it looks valid.
14152   SDValue Extend = BV->getOperand(0);
14153   unsigned ExtendOpcode = Extend.getOpcode();
14154   bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND ||
14155                 ExtendOpcode == ISD::SIGN_EXTEND_INREG ||
14156                 ExtendOpcode == ISD::AssertSext;
14157   if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND &&
14158       ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND)
14159     return SDValue();
14160   // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure
14161   // calculatePreExtendType will work without issue.
14162   if (BV.getOpcode() == ISD::VECTOR_SHUFFLE &&
14163       ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND)
14164     return SDValue();
14165 
14166   // Restrict valid pre-extend data type
14167   EVT PreExtendType = calculatePreExtendType(Extend);
14168   if (PreExtendType == MVT::Other ||
14169       PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2)
14170     return SDValue();
14171 
14172   // Make sure all other operands are equally extended
14173   for (SDValue Op : drop_begin(BV->ops())) {
14174     if (Op.isUndef())
14175       continue;
14176     unsigned Opc = Op.getOpcode();
14177     bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG ||
14178                      Opc == ISD::AssertSext;
14179     if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType)
14180       return SDValue();
14181   }
14182 
14183   SDValue NBV;
14184   SDLoc DL(BV);
14185   if (BV.getOpcode() == ISD::BUILD_VECTOR) {
14186     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
14187     EVT PreExtendLegalType =
14188         PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
14189     SmallVector<SDValue, 8> NewOps;
14190     for (SDValue Op : BV->ops())
14191       NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
14192                                     : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
14193                                                            PreExtendLegalType));
14194     NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
14195   } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
14196     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
14197     NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
14198                                BV.getOperand(1).isUndef()
14199                                    ? DAG.getUNDEF(PreExtendVT)
14200                                    : BV.getOperand(1).getOperand(0),
14201                                cast<ShuffleVectorSDNode>(BV)->getMask());
14202   }
14203   return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV);
14204 }
14205 
14206 /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup))
14207 /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt
14208 static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {
14209   // If the value type isn't a vector, none of the operands are going to be dups
14210   EVT VT = Mul->getValueType(0);
14211   if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64)
14212     return SDValue();
14213 
14214   SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG);
14215   SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG);
14216 
14217   // Neither operands have been changed, don't make any further changes
14218   if (!Op0 && !Op1)
14219     return SDValue();
14220 
14221   SDLoc DL(Mul);
14222   return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0),
14223                      Op1 ? Op1 : Mul->getOperand(1));
14224 }
14225 
14226 static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
14227                                  TargetLowering::DAGCombinerInfo &DCI,
14228                                  const AArch64Subtarget *Subtarget) {
14229 
14230   if (SDValue Ext = performMulVectorExtendCombine(N, DAG))
14231     return Ext;
14232 
14233   if (DCI.isBeforeLegalizeOps())
14234     return SDValue();
14235 
14236   // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y,
14237   // and in MachineCombiner pass, add+mul will be combined into madd.
14238   // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X.
14239   SDLoc DL(N);
14240   EVT VT = N->getValueType(0);
14241   SDValue N0 = N->getOperand(0);
14242   SDValue N1 = N->getOperand(1);
14243   SDValue MulOper;
14244   unsigned AddSubOpc;
14245 
14246   auto IsAddSubWith1 = [&](SDValue V) -> bool {
14247     AddSubOpc = V->getOpcode();
14248     if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
14249       SDValue Opnd = V->getOperand(1);
14250       MulOper = V->getOperand(0);
14251       if (AddSubOpc == ISD::SUB)
14252         std::swap(Opnd, MulOper);
14253       if (auto C = dyn_cast<ConstantSDNode>(Opnd))
14254         return C->isOne();
14255     }
14256     return false;
14257   };
14258 
14259   if (IsAddSubWith1(N0)) {
14260     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
14261     return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
14262   }
14263 
14264   if (IsAddSubWith1(N1)) {
14265     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
14266     return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
14267   }
14268 
14269   // The below optimizations require a constant RHS.
14270   if (!isa<ConstantSDNode>(N1))
14271     return SDValue();
14272 
14273   ConstantSDNode *C = cast<ConstantSDNode>(N1);
14274   const APInt &ConstValue = C->getAPIntValue();
14275 
14276   // Allow the scaling to be folded into the `cnt` instruction by preventing
14277   // the scaling to be obscured here. This makes it easier to pattern match.
14278   if (IsSVECntIntrinsic(N0) ||
14279      (N0->getOpcode() == ISD::TRUNCATE &&
14280       (IsSVECntIntrinsic(N0->getOperand(0)))))
14281        if (ConstValue.sge(1) && ConstValue.sle(16))
14282          return SDValue();
14283 
14284   // Multiplication of a power of two plus/minus one can be done more
14285   // cheaply as as shift+add/sub. For now, this is true unilaterally. If
14286   // future CPUs have a cheaper MADD instruction, this may need to be
14287   // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
14288   // 64-bit is 5 cycles, so this is always a win.
14289   // More aggressively, some multiplications N0 * C can be lowered to
14290   // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
14291   // e.g. 6=3*2=(2+1)*2.
14292   // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
14293   // which equals to (1+2)*16-(1+2).
14294 
14295   // TrailingZeroes is used to test if the mul can be lowered to
14296   // shift+add+shift.
14297   unsigned TrailingZeroes = ConstValue.countTrailingZeros();
14298   if (TrailingZeroes) {
14299     // Conservatively do not lower to shift+add+shift if the mul might be
14300     // folded into smul or umul.
14301     if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
14302                             isZeroExtended(N0.getNode(), DAG)))
14303       return SDValue();
14304     // Conservatively do not lower to shift+add+shift if the mul might be
14305     // folded into madd or msub.
14306     if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD ||
14307                            N->use_begin()->getOpcode() == ISD::SUB))
14308       return SDValue();
14309   }
14310   // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
14311   // and shift+add+shift.
14312   APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
14313 
14314   unsigned ShiftAmt;
14315   // Is the shifted value the LHS operand of the add/sub?
14316   bool ShiftValUseIsN0 = true;
14317   // Do we need to negate the result?
14318   bool NegateResult = false;
14319 
14320   if (ConstValue.isNonNegative()) {
14321     // (mul x, 2^N + 1) => (add (shl x, N), x)
14322     // (mul x, 2^N - 1) => (sub (shl x, N), x)
14323     // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
14324     APInt SCVMinus1 = ShiftedConstValue - 1;
14325     APInt CVPlus1 = ConstValue + 1;
14326     if (SCVMinus1.isPowerOf2()) {
14327       ShiftAmt = SCVMinus1.logBase2();
14328       AddSubOpc = ISD::ADD;
14329     } else if (CVPlus1.isPowerOf2()) {
14330       ShiftAmt = CVPlus1.logBase2();
14331       AddSubOpc = ISD::SUB;
14332     } else
14333       return SDValue();
14334   } else {
14335     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
14336     // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
14337     APInt CVNegPlus1 = -ConstValue + 1;
14338     APInt CVNegMinus1 = -ConstValue - 1;
14339     if (CVNegPlus1.isPowerOf2()) {
14340       ShiftAmt = CVNegPlus1.logBase2();
14341       AddSubOpc = ISD::SUB;
14342       ShiftValUseIsN0 = false;
14343     } else if (CVNegMinus1.isPowerOf2()) {
14344       ShiftAmt = CVNegMinus1.logBase2();
14345       AddSubOpc = ISD::ADD;
14346       NegateResult = true;
14347     } else
14348       return SDValue();
14349   }
14350 
14351   SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0,
14352                                    DAG.getConstant(ShiftAmt, DL, MVT::i64));
14353 
14354   SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
14355   SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
14356   SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
14357   assert(!(NegateResult && TrailingZeroes) &&
14358          "NegateResult and TrailingZeroes cannot both be true for now.");
14359   // Negate the result.
14360   if (NegateResult)
14361     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
14362   // Shift the result.
14363   if (TrailingZeroes)
14364     return DAG.getNode(ISD::SHL, DL, VT, Res,
14365                        DAG.getConstant(TrailingZeroes, DL, MVT::i64));
14366   return Res;
14367 }
14368 
14369 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
14370                                                          SelectionDAG &DAG) {
14371   // Take advantage of vector comparisons producing 0 or -1 in each lane to
14372   // optimize away operation when it's from a constant.
14373   //
14374   // The general transformation is:
14375   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
14376   //       AND(VECTOR_CMP(x,y), constant2)
14377   //    constant2 = UNARYOP(constant)
14378 
14379   // Early exit if this isn't a vector operation, the operand of the
14380   // unary operation isn't a bitwise AND, or if the sizes of the operations
14381   // aren't the same.
14382   EVT VT = N->getValueType(0);
14383   if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
14384       N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
14385       VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
14386     return SDValue();
14387 
14388   // Now check that the other operand of the AND is a constant. We could
14389   // make the transformation for non-constant splats as well, but it's unclear
14390   // that would be a benefit as it would not eliminate any operations, just
14391   // perform one more step in scalar code before moving to the vector unit.
14392   if (BuildVectorSDNode *BV =
14393           dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
14394     // Bail out if the vector isn't a constant.
14395     if (!BV->isConstant())
14396       return SDValue();
14397 
14398     // Everything checks out. Build up the new and improved node.
14399     SDLoc DL(N);
14400     EVT IntVT = BV->getValueType(0);
14401     // Create a new constant of the appropriate type for the transformed
14402     // DAG.
14403     SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
14404     // The AND node needs bitcasts to/from an integer vector type around it.
14405     SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
14406     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
14407                                  N->getOperand(0)->getOperand(0), MaskConst);
14408     SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
14409     return Res;
14410   }
14411 
14412   return SDValue();
14413 }
14414 
14415 static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
14416                                      const AArch64Subtarget *Subtarget) {
14417   // First try to optimize away the conversion when it's conditionally from
14418   // a constant. Vectors only.
14419   if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
14420     return Res;
14421 
14422   EVT VT = N->getValueType(0);
14423   if (VT != MVT::f32 && VT != MVT::f64)
14424     return SDValue();
14425 
14426   // Only optimize when the source and destination types have the same width.
14427   if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
14428     return SDValue();
14429 
14430   // If the result of an integer load is only used by an integer-to-float
14431   // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
14432   // This eliminates an "integer-to-vector-move" UOP and improves throughput.
14433   SDValue N0 = N->getOperand(0);
14434   if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
14435       // Do not change the width of a volatile load.
14436       !cast<LoadSDNode>(N0)->isVolatile()) {
14437     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
14438     SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
14439                                LN0->getPointerInfo(), LN0->getAlign(),
14440                                LN0->getMemOperand()->getFlags());
14441 
14442     // Make sure successors of the original load stay after it by updating them
14443     // to use the new Chain.
14444     DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
14445 
14446     unsigned Opcode =
14447         (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
14448     return DAG.getNode(Opcode, SDLoc(N), VT, Load);
14449   }
14450 
14451   return SDValue();
14452 }
14453 
14454 /// Fold a floating-point multiply by power of two into floating-point to
14455 /// fixed-point conversion.
14456 static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
14457                                      TargetLowering::DAGCombinerInfo &DCI,
14458                                      const AArch64Subtarget *Subtarget) {
14459   if (!Subtarget->hasNEON())
14460     return SDValue();
14461 
14462   if (!N->getValueType(0).isSimple())
14463     return SDValue();
14464 
14465   SDValue Op = N->getOperand(0);
14466   if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL)
14467     return SDValue();
14468 
14469   if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector())
14470     return SDValue();
14471 
14472   SDValue ConstVec = Op->getOperand(1);
14473   if (!isa<BuildVectorSDNode>(ConstVec))
14474     return SDValue();
14475 
14476   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14477   uint32_t FloatBits = FloatTy.getSizeInBits();
14478   if (FloatBits != 32 && FloatBits != 64 &&
14479       (FloatBits != 16 || !Subtarget->hasFullFP16()))
14480     return SDValue();
14481 
14482   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14483   uint32_t IntBits = IntTy.getSizeInBits();
14484   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14485     return SDValue();
14486 
14487   // Avoid conversions where iN is larger than the float (e.g., float -> i64).
14488   if (IntBits > FloatBits)
14489     return SDValue();
14490 
14491   BitVector UndefElements;
14492   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14493   int32_t Bits = IntBits == 64 ? 64 : 32;
14494   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1);
14495   if (C == -1 || C == 0 || C > Bits)
14496     return SDValue();
14497 
14498   EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger();
14499   if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy))
14500     return SDValue();
14501 
14502   if (N->getOpcode() == ISD::FP_TO_SINT_SAT ||
14503       N->getOpcode() == ISD::FP_TO_UINT_SAT) {
14504     EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
14505     if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits)
14506       return SDValue();
14507   }
14508 
14509   SDLoc DL(N);
14510   bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT ||
14511                    N->getOpcode() == ISD::FP_TO_SINT_SAT);
14512   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
14513                                       : Intrinsic::aarch64_neon_vcvtfp2fxu;
14514   SDValue FixConv =
14515       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
14516                   DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
14517                   Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
14518   // We can handle smaller integers by generating an extra trunc.
14519   if (IntBits < FloatBits)
14520     FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
14521 
14522   return FixConv;
14523 }
14524 
14525 /// Fold a floating-point divide by power of two into fixed-point to
14526 /// floating-point conversion.
14527 static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG,
14528                                   TargetLowering::DAGCombinerInfo &DCI,
14529                                   const AArch64Subtarget *Subtarget) {
14530   if (!Subtarget->hasNEON())
14531     return SDValue();
14532 
14533   SDValue Op = N->getOperand(0);
14534   unsigned Opc = Op->getOpcode();
14535   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14536       !Op.getOperand(0).getValueType().isSimple() ||
14537       (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP))
14538     return SDValue();
14539 
14540   SDValue ConstVec = N->getOperand(1);
14541   if (!isa<BuildVectorSDNode>(ConstVec))
14542     return SDValue();
14543 
14544   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14545   int32_t IntBits = IntTy.getSizeInBits();
14546   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14547     return SDValue();
14548 
14549   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14550   int32_t FloatBits = FloatTy.getSizeInBits();
14551   if (FloatBits != 32 && FloatBits != 64)
14552     return SDValue();
14553 
14554   // Avoid conversions where iN is larger than the float (e.g., i64 -> float).
14555   if (IntBits > FloatBits)
14556     return SDValue();
14557 
14558   BitVector UndefElements;
14559   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14560   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1);
14561   if (C == -1 || C == 0 || C > FloatBits)
14562     return SDValue();
14563 
14564   MVT ResTy;
14565   unsigned NumLanes = Op.getValueType().getVectorNumElements();
14566   switch (NumLanes) {
14567   default:
14568     return SDValue();
14569   case 2:
14570     ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64;
14571     break;
14572   case 4:
14573     ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64;
14574     break;
14575   }
14576 
14577   if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps())
14578     return SDValue();
14579 
14580   SDLoc DL(N);
14581   SDValue ConvInput = Op.getOperand(0);
14582   bool IsSigned = Opc == ISD::SINT_TO_FP;
14583   if (IntBits < FloatBits)
14584     ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
14585                             ResTy, ConvInput);
14586 
14587   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
14588                                       : Intrinsic::aarch64_neon_vcvtfxu2fp;
14589   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
14590                      DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput,
14591                      DAG.getConstant(C, DL, MVT::i32));
14592 }
14593 
14594 /// An EXTR instruction is made up of two shifts, ORed together. This helper
14595 /// searches for and classifies those shifts.
14596 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
14597                          bool &FromHi) {
14598   if (N.getOpcode() == ISD::SHL)
14599     FromHi = false;
14600   else if (N.getOpcode() == ISD::SRL)
14601     FromHi = true;
14602   else
14603     return false;
14604 
14605   if (!isa<ConstantSDNode>(N.getOperand(1)))
14606     return false;
14607 
14608   ShiftAmount = N->getConstantOperandVal(1);
14609   Src = N->getOperand(0);
14610   return true;
14611 }
14612 
14613 /// EXTR instruction extracts a contiguous chunk of bits from two existing
14614 /// registers viewed as a high/low pair. This function looks for the pattern:
14615 /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
14616 /// with an EXTR. Can't quite be done in TableGen because the two immediates
14617 /// aren't independent.
14618 static SDValue tryCombineToEXTR(SDNode *N,
14619                                 TargetLowering::DAGCombinerInfo &DCI) {
14620   SelectionDAG &DAG = DCI.DAG;
14621   SDLoc DL(N);
14622   EVT VT = N->getValueType(0);
14623 
14624   assert(N->getOpcode() == ISD::OR && "Unexpected root");
14625 
14626   if (VT != MVT::i32 && VT != MVT::i64)
14627     return SDValue();
14628 
14629   SDValue LHS;
14630   uint32_t ShiftLHS = 0;
14631   bool LHSFromHi = false;
14632   if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
14633     return SDValue();
14634 
14635   SDValue RHS;
14636   uint32_t ShiftRHS = 0;
14637   bool RHSFromHi = false;
14638   if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
14639     return SDValue();
14640 
14641   // If they're both trying to come from the high part of the register, they're
14642   // not really an EXTR.
14643   if (LHSFromHi == RHSFromHi)
14644     return SDValue();
14645 
14646   if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
14647     return SDValue();
14648 
14649   if (LHSFromHi) {
14650     std::swap(LHS, RHS);
14651     std::swap(ShiftLHS, ShiftRHS);
14652   }
14653 
14654   return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
14655                      DAG.getConstant(ShiftRHS, DL, MVT::i64));
14656 }
14657 
14658 static SDValue tryCombineToBSL(SDNode *N,
14659                                 TargetLowering::DAGCombinerInfo &DCI) {
14660   EVT VT = N->getValueType(0);
14661   SelectionDAG &DAG = DCI.DAG;
14662   SDLoc DL(N);
14663 
14664   if (!VT.isVector())
14665     return SDValue();
14666 
14667   // The combining code currently only works for NEON vectors. In particular,
14668   // it does not work for SVE when dealing with vectors wider than 128 bits.
14669   if (!VT.is64BitVector() && !VT.is128BitVector())
14670     return SDValue();
14671 
14672   SDValue N0 = N->getOperand(0);
14673   if (N0.getOpcode() != ISD::AND)
14674     return SDValue();
14675 
14676   SDValue N1 = N->getOperand(1);
14677   if (N1.getOpcode() != ISD::AND)
14678     return SDValue();
14679 
14680   // InstCombine does (not (neg a)) => (add a -1).
14681   // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
14682   // Loop over all combinations of AND operands.
14683   for (int i = 1; i >= 0; --i) {
14684     for (int j = 1; j >= 0; --j) {
14685       SDValue O0 = N0->getOperand(i);
14686       SDValue O1 = N1->getOperand(j);
14687       SDValue Sub, Add, SubSibling, AddSibling;
14688 
14689       // Find a SUB and an ADD operand, one from each AND.
14690       if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
14691         Sub = O0;
14692         Add = O1;
14693         SubSibling = N0->getOperand(1 - i);
14694         AddSibling = N1->getOperand(1 - j);
14695       } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
14696         Add = O0;
14697         Sub = O1;
14698         AddSibling = N0->getOperand(1 - i);
14699         SubSibling = N1->getOperand(1 - j);
14700       } else
14701         continue;
14702 
14703       if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode()))
14704         continue;
14705 
14706       // Constant ones is always righthand operand of the Add.
14707       if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode()))
14708         continue;
14709 
14710       if (Sub.getOperand(1) != Add.getOperand(0))
14711         continue;
14712 
14713       return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
14714     }
14715   }
14716 
14717   // (or (and a b) (and (not a) c)) => (bsl a b c)
14718   // We only have to look for constant vectors here since the general, variable
14719   // case can be handled in TableGen.
14720   unsigned Bits = VT.getScalarSizeInBits();
14721   uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
14722   for (int i = 1; i >= 0; --i)
14723     for (int j = 1; j >= 0; --j) {
14724       BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
14725       BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
14726       if (!BVN0 || !BVN1)
14727         continue;
14728 
14729       bool FoundMatch = true;
14730       for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
14731         ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
14732         ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
14733         if (!CN0 || !CN1 ||
14734             CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
14735           FoundMatch = false;
14736           break;
14737         }
14738       }
14739 
14740       if (FoundMatch)
14741         return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0),
14742                            N0->getOperand(1 - i), N1->getOperand(1 - j));
14743     }
14744 
14745   return SDValue();
14746 }
14747 
14748 // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
14749 // convert to csel(ccmp(.., cc0)), depending on cc1:
14750 
14751 // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14752 // =>
14753 // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0))
14754 //
14755 // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14756 // =>
14757 // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
14758 static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
14759   EVT VT = N->getValueType(0);
14760   SDValue CSel0 = N->getOperand(0);
14761   SDValue CSel1 = N->getOperand(1);
14762 
14763   if (CSel0.getOpcode() != AArch64ISD::CSEL ||
14764       CSel1.getOpcode() != AArch64ISD::CSEL)
14765     return SDValue();
14766 
14767   if (!CSel0->hasOneUse() || !CSel1->hasOneUse())
14768     return SDValue();
14769 
14770   if (!isNullConstant(CSel0.getOperand(0)) ||
14771       !isOneConstant(CSel0.getOperand(1)) ||
14772       !isNullConstant(CSel1.getOperand(0)) ||
14773       !isOneConstant(CSel1.getOperand(1)))
14774     return SDValue();
14775 
14776   SDValue Cmp0 = CSel0.getOperand(3);
14777   SDValue Cmp1 = CSel1.getOperand(3);
14778   AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2);
14779   AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
14780   if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
14781     return SDValue();
14782   if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
14783       Cmp0.getOpcode() == AArch64ISD::SUBS) {
14784     std::swap(Cmp0, Cmp1);
14785     std::swap(CC0, CC1);
14786   }
14787 
14788   if (Cmp1.getOpcode() != AArch64ISD::SUBS)
14789     return SDValue();
14790 
14791   SDLoc DL(N);
14792   SDValue CCmp;
14793 
14794   if (N->getOpcode() == ISD::AND) {
14795     AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0);
14796     SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
14797     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
14798     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14799     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14800                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14801   } else {
14802     SDLoc DL(N);
14803     AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1);
14804     SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC);
14805     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
14806     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14807     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14808                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14809   }
14810   return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
14811                      CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32),
14812                      CCmp);
14813 }
14814 
14815 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
14816                                 const AArch64Subtarget *Subtarget) {
14817   SelectionDAG &DAG = DCI.DAG;
14818   EVT VT = N->getValueType(0);
14819 
14820   if (SDValue R = performANDORCSELCombine(N, DAG))
14821     return R;
14822 
14823   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14824     return SDValue();
14825 
14826   // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
14827   if (SDValue Res = tryCombineToEXTR(N, DCI))
14828     return Res;
14829 
14830   if (SDValue Res = tryCombineToBSL(N, DCI))
14831     return Res;
14832 
14833   return SDValue();
14834 }
14835 
14836 static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
14837   if (!MemVT.getVectorElementType().isSimple())
14838     return false;
14839 
14840   uint64_t MaskForTy = 0ull;
14841   switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
14842   case MVT::i8:
14843     MaskForTy = 0xffull;
14844     break;
14845   case MVT::i16:
14846     MaskForTy = 0xffffull;
14847     break;
14848   case MVT::i32:
14849     MaskForTy = 0xffffffffull;
14850     break;
14851   default:
14852     return false;
14853     break;
14854   }
14855 
14856   if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
14857     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
14858       return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
14859 
14860   return false;
14861 }
14862 
14863 static SDValue performSVEAndCombine(SDNode *N,
14864                                     TargetLowering::DAGCombinerInfo &DCI) {
14865   if (DCI.isBeforeLegalizeOps())
14866     return SDValue();
14867 
14868   SelectionDAG &DAG = DCI.DAG;
14869   SDValue Src = N->getOperand(0);
14870   unsigned Opc = Src->getOpcode();
14871 
14872   // Zero/any extend of an unsigned unpack
14873   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
14874     SDValue UnpkOp = Src->getOperand(0);
14875     SDValue Dup = N->getOperand(1);
14876 
14877     if (Dup.getOpcode() != ISD::SPLAT_VECTOR)
14878       return SDValue();
14879 
14880     SDLoc DL(N);
14881     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0));
14882     if (!C)
14883       return SDValue();
14884 
14885     uint64_t ExtVal = C->getZExtValue();
14886 
14887     // If the mask is fully covered by the unpack, we don't need to push
14888     // a new AND onto the operand
14889     EVT EltTy = UnpkOp->getValueType(0).getVectorElementType();
14890     if ((ExtVal == 0xFF && EltTy == MVT::i8) ||
14891         (ExtVal == 0xFFFF && EltTy == MVT::i16) ||
14892         (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32))
14893       return Src;
14894 
14895     // Truncate to prevent a DUP with an over wide constant
14896     APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits());
14897 
14898     // Otherwise, make sure we propagate the AND to the operand
14899     // of the unpack
14900     Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0),
14901                       DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32));
14902 
14903     SDValue And = DAG.getNode(ISD::AND, DL,
14904                               UnpkOp->getValueType(0), UnpkOp, Dup);
14905 
14906     return DAG.getNode(Opc, DL, N->getValueType(0), And);
14907   }
14908 
14909   if (!EnableCombineMGatherIntrinsics)
14910     return SDValue();
14911 
14912   SDValue Mask = N->getOperand(1);
14913 
14914   if (!Src.hasOneUse())
14915     return SDValue();
14916 
14917   EVT MemVT;
14918 
14919   // SVE load instructions perform an implicit zero-extend, which makes them
14920   // perfect candidates for combining.
14921   switch (Opc) {
14922   case AArch64ISD::LD1_MERGE_ZERO:
14923   case AArch64ISD::LDNF1_MERGE_ZERO:
14924   case AArch64ISD::LDFF1_MERGE_ZERO:
14925     MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT();
14926     break;
14927   case AArch64ISD::GLD1_MERGE_ZERO:
14928   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
14929   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
14930   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
14931   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
14932   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
14933   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
14934   case AArch64ISD::GLDFF1_MERGE_ZERO:
14935   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
14936   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
14937   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
14938   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
14939   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
14940   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
14941   case AArch64ISD::GLDNT1_MERGE_ZERO:
14942     MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
14943     break;
14944   default:
14945     return SDValue();
14946   }
14947 
14948   if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
14949     return Src;
14950 
14951   return SDValue();
14952 }
14953 
14954 static SDValue performANDCombine(SDNode *N,
14955                                  TargetLowering::DAGCombinerInfo &DCI) {
14956   SelectionDAG &DAG = DCI.DAG;
14957   SDValue LHS = N->getOperand(0);
14958   SDValue RHS = N->getOperand(1);
14959   EVT VT = N->getValueType(0);
14960 
14961   if (SDValue R = performANDORCSELCombine(N, DAG))
14962     return R;
14963 
14964   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14965     return SDValue();
14966 
14967   if (VT.isScalableVector())
14968     return performSVEAndCombine(N, DCI);
14969 
14970   // The combining code below works only for NEON vectors. In particular, it
14971   // does not work for SVE when dealing with vectors wider than 128 bits.
14972   if (!VT.is64BitVector() && !VT.is128BitVector())
14973     return SDValue();
14974 
14975   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
14976   if (!BVN)
14977     return SDValue();
14978 
14979   // AND does not accept an immediate, so check if we can use a BIC immediate
14980   // instruction instead. We do this here instead of using a (and x, (mvni imm))
14981   // pattern in isel, because some immediates may be lowered to the preferred
14982   // (and x, (movi imm)) form, even though an mvni representation also exists.
14983   APInt DefBits(VT.getSizeInBits(), 0);
14984   APInt UndefBits(VT.getSizeInBits(), 0);
14985   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
14986     SDValue NewOp;
14987 
14988     DefBits = ~DefBits;
14989     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14990                                     DefBits, &LHS)) ||
14991         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14992                                     DefBits, &LHS)))
14993       return NewOp;
14994 
14995     UndefBits = ~UndefBits;
14996     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14997                                     UndefBits, &LHS)) ||
14998         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14999                                     UndefBits, &LHS)))
15000       return NewOp;
15001   }
15002 
15003   return SDValue();
15004 }
15005 
15006 static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
15007   switch (Opcode) {
15008   case ISD::STRICT_FADD:
15009   case ISD::FADD:
15010     return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64;
15011   case ISD::ADD:
15012     return VT == MVT::i64;
15013   default:
15014     return false;
15015   }
15016 }
15017 
15018 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
15019                         AArch64CC::CondCode Cond);
15020 
15021 static bool isPredicateCCSettingOp(SDValue N) {
15022   if ((N.getOpcode() == ISD::SETCC) ||
15023       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15024        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
15025         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
15026         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi ||
15027         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs ||
15028         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
15029         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
15030         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
15031         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
15032         // get_active_lane_mask is lowered to a whilelo instruction.
15033         N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask)))
15034     return true;
15035 
15036   return false;
15037 }
15038 
15039 // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
15040 // ... into: "ptrue p, all" + PTEST
15041 static SDValue
15042 performFirstTrueTestVectorCombine(SDNode *N,
15043                                   TargetLowering::DAGCombinerInfo &DCI,
15044                                   const AArch64Subtarget *Subtarget) {
15045   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15046   // Make sure PTEST can be legalised with illegal types.
15047   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15048     return SDValue();
15049 
15050   SDValue N0 = N->getOperand(0);
15051   EVT VT = N0.getValueType();
15052 
15053   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 ||
15054       !isNullConstant(N->getOperand(1)))
15055     return SDValue();
15056 
15057   // Restricted the DAG combine to only cases where we're extracting from a
15058   // flag-setting operation.
15059   if (!isPredicateCCSettingOp(N0))
15060     return SDValue();
15061 
15062   // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0
15063   SelectionDAG &DAG = DCI.DAG;
15064   SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all);
15065   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE);
15066 }
15067 
15068 // Materialize : Idx = (add (mul vscale, NumEls), -1)
15069 //               i1 = extract_vector_elt t37, Constant:i64<Idx>
15070 //     ... into: "ptrue p, all" + PTEST
15071 static SDValue
15072 performLastTrueTestVectorCombine(SDNode *N,
15073                                  TargetLowering::DAGCombinerInfo &DCI,
15074                                  const AArch64Subtarget *Subtarget) {
15075   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15076   // Make sure PTEST is legal types.
15077   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
15078     return SDValue();
15079 
15080   SDValue N0 = N->getOperand(0);
15081   EVT OpVT = N0.getValueType();
15082 
15083   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
15084     return SDValue();
15085 
15086   // Idx == (add (mul vscale, NumEls), -1)
15087   SDValue Idx = N->getOperand(1);
15088   if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1)))
15089     return SDValue();
15090 
15091   SDValue VS = Idx.getOperand(0);
15092   if (VS.getOpcode() != ISD::VSCALE)
15093     return SDValue();
15094 
15095   unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue();
15096   if (VS.getConstantOperandVal(0) != NumEls)
15097     return SDValue();
15098 
15099   // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0
15100   SelectionDAG &DAG = DCI.DAG;
15101   SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all);
15102   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
15103 }
15104 
15105 static SDValue
15106 performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15107                                const AArch64Subtarget *Subtarget) {
15108   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15109   if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget))
15110     return Res;
15111   if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
15112     return Res;
15113 
15114   SelectionDAG &DAG = DCI.DAG;
15115   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15116   ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);
15117 
15118   EVT VT = N->getValueType(0);
15119   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
15120   bool IsStrict = N0->isStrictFPOpcode();
15121 
15122   // extract(dup x) -> x
15123   if (N0.getOpcode() == AArch64ISD::DUP)
15124     return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
15125 
15126   // Rewrite for pairwise fadd pattern
15127   //   (f32 (extract_vector_elt
15128   //           (fadd (vXf32 Other)
15129   //                 (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0))
15130   // ->
15131   //   (f32 (fadd (extract_vector_elt (vXf32 Other) 0)
15132   //              (extract_vector_elt (vXf32 Other) 1))
15133   // For strict_fadd we need to make sure the old strict_fadd can be deleted, so
15134   // we can only do this when it's used only by the extract_vector_elt.
15135   if (ConstantN1 && ConstantN1->getZExtValue() == 0 &&
15136       hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) &&
15137       (!IsStrict || N0.hasOneUse())) {
15138     SDLoc DL(N0);
15139     SDValue N00 = N0->getOperand(IsStrict ? 1 : 0);
15140     SDValue N01 = N0->getOperand(IsStrict ? 2 : 1);
15141 
15142     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01);
15143     SDValue Other = N00;
15144 
15145     // And handle the commutative case.
15146     if (!Shuffle) {
15147       Shuffle = dyn_cast<ShuffleVectorSDNode>(N00);
15148       Other = N01;
15149     }
15150 
15151     if (Shuffle && Shuffle->getMaskElt(0) == 1 &&
15152         Other == Shuffle->getOperand(0)) {
15153       SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15154                                      DAG.getConstant(0, DL, MVT::i64));
15155       SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15156                                      DAG.getConstant(1, DL, MVT::i64));
15157       if (!IsStrict)
15158         return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2);
15159 
15160       // For strict_fadd we need uses of the final extract_vector to be replaced
15161       // with the strict_fadd, but we also need uses of the chain output of the
15162       // original strict_fadd to use the chain output of the new strict_fadd as
15163       // otherwise it may not be deleted.
15164       SDValue Ret = DAG.getNode(N0->getOpcode(), DL,
15165                                 {VT, MVT::Other},
15166                                 {N0->getOperand(0), Extract1, Extract2});
15167       DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret);
15168       DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1));
15169       return SDValue(N, 0);
15170     }
15171   }
15172 
15173   return SDValue();
15174 }
15175 
15176 static SDValue performConcatVectorsCombine(SDNode *N,
15177                                            TargetLowering::DAGCombinerInfo &DCI,
15178                                            SelectionDAG &DAG) {
15179   SDLoc dl(N);
15180   EVT VT = N->getValueType(0);
15181   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15182   unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode();
15183 
15184   if (VT.isScalableVector())
15185     return SDValue();
15186 
15187   // Optimize concat_vectors of truncated vectors, where the intermediate
15188   // type is illegal, to avoid said illegality,  e.g.,
15189   //   (v4i16 (concat_vectors (v2i16 (truncate (v2i64))),
15190   //                          (v2i16 (truncate (v2i64)))))
15191   // ->
15192   //   (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))),
15193   //                                    (v4i32 (bitcast (v2i64))),
15194   //                                    <0, 2, 4, 6>)))
15195   // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed
15196   // on both input and result type, so we might generate worse code.
15197   // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8.
15198   if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
15199       N1Opc == ISD::TRUNCATE) {
15200     SDValue N00 = N0->getOperand(0);
15201     SDValue N10 = N1->getOperand(0);
15202     EVT N00VT = N00.getValueType();
15203 
15204     if (N00VT == N10.getValueType() &&
15205         (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) &&
15206         N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) {
15207       MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16);
15208       SmallVector<int, 8> Mask(MidVT.getVectorNumElements());
15209       for (size_t i = 0; i < Mask.size(); ++i)
15210         Mask[i] = i * 2;
15211       return DAG.getNode(ISD::TRUNCATE, dl, VT,
15212                          DAG.getVectorShuffle(
15213                              MidVT, dl,
15214                              DAG.getNode(ISD::BITCAST, dl, MidVT, N00),
15215                              DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask));
15216     }
15217   }
15218 
15219   if (N->getOperand(0).getValueType() == MVT::v4i8) {
15220     // If we have a concat of v4i8 loads, convert them to a buildvector of f32
15221     // loads to prevent having to go through the v4i8 load legalization that
15222     // needs to extend each element into a larger type.
15223     if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
15224           if (V.getValueType() != MVT::v4i8)
15225             return false;
15226           if (V.isUndef())
15227             return true;
15228           LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
15229           return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
15230                  LD->getExtensionType() == ISD::NON_EXTLOAD;
15231         })) {
15232       EVT NVT =
15233           EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
15234       SmallVector<SDValue> Ops;
15235 
15236       for (unsigned i = 0; i < N->getNumOperands(); i++) {
15237         SDValue V = N->getOperand(i);
15238         if (V.isUndef())
15239           Ops.push_back(DAG.getUNDEF(MVT::f32));
15240         else {
15241           LoadSDNode *LD = cast<LoadSDNode>(V);
15242           SDValue NewLoad =
15243               DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
15244                           LD->getMemOperand());
15245           DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
15246           Ops.push_back(NewLoad);
15247         }
15248       }
15249       return DAG.getBitcast(N->getValueType(0),
15250                             DAG.getBuildVector(NVT, dl, Ops));
15251     }
15252   }
15253 
15254 
15255   // Wait 'til after everything is legalized to try this. That way we have
15256   // legal vector types and such.
15257   if (DCI.isBeforeLegalizeOps())
15258     return SDValue();
15259 
15260   // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
15261   // extracted subvectors from the same original vectors. Combine these into a
15262   // single avg that operates on the two original vectors.
15263   // avgceil is the target independant name for rhadd, avgfloor is a hadd.
15264   // Example:
15265   //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
15266   //                                   extract_subvector (v16i8 OpB, <0>))),
15267   //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
15268   //                                   extract_subvector (v16i8 OpB, <8>)))))
15269   // ->
15270   //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
15271   if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
15272       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
15273        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
15274     SDValue N00 = N0->getOperand(0);
15275     SDValue N01 = N0->getOperand(1);
15276     SDValue N10 = N1->getOperand(0);
15277     SDValue N11 = N1->getOperand(1);
15278 
15279     EVT N00VT = N00.getValueType();
15280     EVT N10VT = N10.getValueType();
15281 
15282     if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15283         N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15284         N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15285         N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
15286       SDValue N00Source = N00->getOperand(0);
15287       SDValue N01Source = N01->getOperand(0);
15288       SDValue N10Source = N10->getOperand(0);
15289       SDValue N11Source = N11->getOperand(0);
15290 
15291       if (N00Source == N10Source && N01Source == N11Source &&
15292           N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
15293         assert(N0.getValueType() == N1.getValueType());
15294 
15295         uint64_t N00Index = N00.getConstantOperandVal(1);
15296         uint64_t N01Index = N01.getConstantOperandVal(1);
15297         uint64_t N10Index = N10.getConstantOperandVal(1);
15298         uint64_t N11Index = N11.getConstantOperandVal(1);
15299 
15300         if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
15301             N10Index == N00VT.getVectorNumElements())
15302           return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
15303       }
15304     }
15305   }
15306 
15307   // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
15308   // splat. The indexed instructions are going to be expecting a DUPLANE64, so
15309   // canonicalise to that.
15310   if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) {
15311     assert(VT.getScalarSizeInBits() == 64);
15312     return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
15313                        DAG.getConstant(0, dl, MVT::i64));
15314   }
15315 
15316   // Canonicalise concat_vectors so that the right-hand vector has as few
15317   // bit-casts as possible before its real operation. The primary matching
15318   // destination for these operations will be the narrowing "2" instructions,
15319   // which depend on the operation being performed on this right-hand vector.
15320   // For example,
15321   //    (concat_vectors LHS,  (v1i64 (bitconvert (v4i16 RHS))))
15322   // becomes
15323   //    (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
15324 
15325   if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST)
15326     return SDValue();
15327   SDValue RHS = N1->getOperand(0);
15328   MVT RHSTy = RHS.getValueType().getSimpleVT();
15329   // If the RHS is not a vector, this is not the pattern we're looking for.
15330   if (!RHSTy.isVector())
15331     return SDValue();
15332 
15333   LLVM_DEBUG(
15334       dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
15335 
15336   MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
15337                                   RHSTy.getVectorNumElements() * 2);
15338   return DAG.getNode(ISD::BITCAST, dl, VT,
15339                      DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
15340                                  DAG.getNode(ISD::BITCAST, dl, RHSTy, N0),
15341                                  RHS));
15342 }
15343 
15344 static SDValue
15345 performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15346                                SelectionDAG &DAG) {
15347   if (DCI.isBeforeLegalizeOps())
15348     return SDValue();
15349 
15350   EVT VT = N->getValueType(0);
15351   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
15352     return SDValue();
15353 
15354   SDValue V = N->getOperand(0);
15355 
15356   // NOTE: This combine exists in DAGCombiner, but that version's legality check
15357   // blocks this combine because the non-const case requires custom lowering.
15358   //
15359   // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const)
15360   if (V.getOpcode() == ISD::SPLAT_VECTOR)
15361     if (isa<ConstantSDNode>(V.getOperand(0)))
15362       return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0));
15363 
15364   return SDValue();
15365 }
15366 
15367 static SDValue
15368 performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15369                               SelectionDAG &DAG) {
15370   SDLoc DL(N);
15371   SDValue Vec = N->getOperand(0);
15372   SDValue SubVec = N->getOperand(1);
15373   uint64_t IdxVal = N->getConstantOperandVal(2);
15374   EVT VecVT = Vec.getValueType();
15375   EVT SubVT = SubVec.getValueType();
15376 
15377   // Only do this for legal fixed vector types.
15378   if (!VecVT.isFixedLengthVector() ||
15379       !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
15380       !DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
15381     return SDValue();
15382 
15383   // Ignore widening patterns.
15384   if (IdxVal == 0 && Vec.isUndef())
15385     return SDValue();
15386 
15387   // Subvector must be half the width and an "aligned" insertion.
15388   unsigned NumSubElts = SubVT.getVectorNumElements();
15389   if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
15390       (IdxVal != 0 && IdxVal != NumSubElts))
15391     return SDValue();
15392 
15393   // Fold insert_subvector -> concat_vectors
15394   // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
15395   // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
15396   SDValue Lo, Hi;
15397   if (IdxVal == 0) {
15398     Lo = SubVec;
15399     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15400                      DAG.getVectorIdxConstant(NumSubElts, DL));
15401   } else {
15402     Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15403                      DAG.getVectorIdxConstant(0, DL));
15404     Hi = SubVec;
15405   }
15406   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
15407 }
15408 
15409 static SDValue tryCombineFixedPointConvert(SDNode *N,
15410                                            TargetLowering::DAGCombinerInfo &DCI,
15411                                            SelectionDAG &DAG) {
15412   // Wait until after everything is legalized to try this. That way we have
15413   // legal vector types and such.
15414   if (DCI.isBeforeLegalizeOps())
15415     return SDValue();
15416   // Transform a scalar conversion of a value from a lane extract into a
15417   // lane extract of a vector conversion. E.g., from foo1 to foo2:
15418   // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
15419   // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
15420   //
15421   // The second form interacts better with instruction selection and the
15422   // register allocator to avoid cross-class register copies that aren't
15423   // coalescable due to a lane reference.
15424 
15425   // Check the operand and see if it originates from a lane extract.
15426   SDValue Op1 = N->getOperand(1);
15427   if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15428     return SDValue();
15429 
15430   // Yep, no additional predication needed. Perform the transform.
15431   SDValue IID = N->getOperand(0);
15432   SDValue Shift = N->getOperand(2);
15433   SDValue Vec = Op1.getOperand(0);
15434   SDValue Lane = Op1.getOperand(1);
15435   EVT ResTy = N->getValueType(0);
15436   EVT VecResTy;
15437   SDLoc DL(N);
15438 
15439   // The vector width should be 128 bits by the time we get here, even
15440   // if it started as 64 bits (the extract_vector handling will have
15441   // done so). Bail if it is not.
15442   if (Vec.getValueSizeInBits() != 128)
15443     return SDValue();
15444 
15445   if (Vec.getValueType() == MVT::v4i32)
15446     VecResTy = MVT::v4f32;
15447   else if (Vec.getValueType() == MVT::v2i64)
15448     VecResTy = MVT::v2f64;
15449   else
15450     return SDValue();
15451 
15452   SDValue Convert =
15453       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
15454   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
15455 }
15456 
15457 // AArch64 high-vector "long" operations are formed by performing the non-high
15458 // version on an extract_subvector of each operand which gets the high half:
15459 //
15460 //  (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
15461 //
15462 // However, there are cases which don't have an extract_high explicitly, but
15463 // have another operation that can be made compatible with one for free. For
15464 // example:
15465 //
15466 //  (dupv64 scalar) --> (extract_high (dup128 scalar))
15467 //
15468 // This routine does the actual conversion of such DUPs, once outer routines
15469 // have determined that everything else is in order.
15470 // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
15471 // similarly here.
15472 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
15473   MVT VT = N.getSimpleValueType();
15474   if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15475       N.getConstantOperandVal(1) == 0)
15476     N = N.getOperand(0);
15477 
15478   switch (N.getOpcode()) {
15479   case AArch64ISD::DUP:
15480   case AArch64ISD::DUPLANE8:
15481   case AArch64ISD::DUPLANE16:
15482   case AArch64ISD::DUPLANE32:
15483   case AArch64ISD::DUPLANE64:
15484   case AArch64ISD::MOVI:
15485   case AArch64ISD::MOVIshift:
15486   case AArch64ISD::MOVIedit:
15487   case AArch64ISD::MOVImsl:
15488   case AArch64ISD::MVNIshift:
15489   case AArch64ISD::MVNImsl:
15490     break;
15491   default:
15492     // FMOV could be supported, but isn't very useful, as it would only occur
15493     // if you passed a bitcast' floating point immediate to an eligible long
15494     // integer op (addl, smull, ...).
15495     return SDValue();
15496   }
15497 
15498   if (!VT.is64BitVector())
15499     return SDValue();
15500 
15501   SDLoc DL(N);
15502   unsigned NumElems = VT.getVectorNumElements();
15503   if (N.getValueType().is64BitVector()) {
15504     MVT ElementTy = VT.getVectorElementType();
15505     MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
15506     N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
15507   }
15508 
15509   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N,
15510                      DAG.getConstant(NumElems, DL, MVT::i64));
15511 }
15512 
15513 static bool isEssentiallyExtractHighSubvector(SDValue N) {
15514   if (N.getOpcode() == ISD::BITCAST)
15515     N = N.getOperand(0);
15516   if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR)
15517     return false;
15518   if (N.getOperand(0).getValueType().isScalableVector())
15519     return false;
15520   return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() ==
15521          N.getOperand(0).getValueType().getVectorNumElements() / 2;
15522 }
15523 
15524 /// Helper structure to keep track of ISD::SET_CC operands.
15525 struct GenericSetCCInfo {
15526   const SDValue *Opnd0;
15527   const SDValue *Opnd1;
15528   ISD::CondCode CC;
15529 };
15530 
15531 /// Helper structure to keep track of a SET_CC lowered into AArch64 code.
15532 struct AArch64SetCCInfo {
15533   const SDValue *Cmp;
15534   AArch64CC::CondCode CC;
15535 };
15536 
15537 /// Helper structure to keep track of SetCC information.
15538 union SetCCInfo {
15539   GenericSetCCInfo Generic;
15540   AArch64SetCCInfo AArch64;
15541 };
15542 
15543 /// Helper structure to be able to read SetCC information.  If set to
15544 /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
15545 /// GenericSetCCInfo.
15546 struct SetCCInfoAndKind {
15547   SetCCInfo Info;
15548   bool IsAArch64;
15549 };
15550 
15551 /// Check whether or not \p Op is a SET_CC operation, either a generic or
15552 /// an
15553 /// AArch64 lowered one.
15554 /// \p SetCCInfo is filled accordingly.
15555 /// \post SetCCInfo is meanginfull only when this function returns true.
15556 /// \return True when Op is a kind of SET_CC operation.
15557 static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
15558   // If this is a setcc, this is straight forward.
15559   if (Op.getOpcode() == ISD::SETCC) {
15560     SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
15561     SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
15562     SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15563     SetCCInfo.IsAArch64 = false;
15564     return true;
15565   }
15566   // Otherwise, check if this is a matching csel instruction.
15567   // In other words:
15568   // - csel 1, 0, cc
15569   // - csel 0, 1, !cc
15570   if (Op.getOpcode() != AArch64ISD::CSEL)
15571     return false;
15572   // Set the information about the operands.
15573   // TODO: we want the operands of the Cmp not the csel
15574   SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
15575   SetCCInfo.IsAArch64 = true;
15576   SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
15577       cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
15578 
15579   // Check that the operands matches the constraints:
15580   // (1) Both operands must be constants.
15581   // (2) One must be 1 and the other must be 0.
15582   ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
15583   ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15584 
15585   // Check (1).
15586   if (!TValue || !FValue)
15587     return false;
15588 
15589   // Check (2).
15590   if (!TValue->isOne()) {
15591     // Update the comparison when we are interested in !cc.
15592     std::swap(TValue, FValue);
15593     SetCCInfo.Info.AArch64.CC =
15594         AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
15595   }
15596   return TValue->isOne() && FValue->isZero();
15597 }
15598 
15599 // Returns true if Op is setcc or zext of setcc.
15600 static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
15601   if (isSetCC(Op, Info))
15602     return true;
15603   return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
15604     isSetCC(Op->getOperand(0), Info));
15605 }
15606 
15607 // The folding we want to perform is:
15608 // (add x, [zext] (setcc cc ...) )
15609 //   -->
15610 // (csel x, (add x, 1), !cc ...)
15611 //
15612 // The latter will get matched to a CSINC instruction.
15613 static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
15614   assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
15615   SDValue LHS = Op->getOperand(0);
15616   SDValue RHS = Op->getOperand(1);
15617   SetCCInfoAndKind InfoAndKind;
15618 
15619   // If both operands are a SET_CC, then we don't want to perform this
15620   // folding and create another csel as this results in more instructions
15621   // (and higher register usage).
15622   if (isSetCCOrZExtSetCC(LHS, InfoAndKind) &&
15623       isSetCCOrZExtSetCC(RHS, InfoAndKind))
15624     return SDValue();
15625 
15626   // If neither operand is a SET_CC, give up.
15627   if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
15628     std::swap(LHS, RHS);
15629     if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
15630       return SDValue();
15631   }
15632 
15633   // FIXME: This could be generatized to work for FP comparisons.
15634   EVT CmpVT = InfoAndKind.IsAArch64
15635                   ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
15636                   : InfoAndKind.Info.Generic.Opnd0->getValueType();
15637   if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
15638     return SDValue();
15639 
15640   SDValue CCVal;
15641   SDValue Cmp;
15642   SDLoc dl(Op);
15643   if (InfoAndKind.IsAArch64) {
15644     CCVal = DAG.getConstant(
15645         AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
15646         MVT::i32);
15647     Cmp = *InfoAndKind.Info.AArch64.Cmp;
15648   } else
15649     Cmp = getAArch64Cmp(
15650         *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1,
15651         ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG,
15652         dl);
15653 
15654   EVT VT = Op->getValueType(0);
15655   LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
15656   return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
15657 }
15658 
15659 // ADD(UADDV a, UADDV b) -->  UADDV(ADD a, b)
15660 static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {
15661   EVT VT = N->getValueType(0);
15662   // Only scalar integer and vector types.
15663   if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger())
15664     return SDValue();
15665 
15666   SDValue LHS = N->getOperand(0);
15667   SDValue RHS = N->getOperand(1);
15668   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15669       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT)
15670     return SDValue();
15671 
15672   auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15673   auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
15674   if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero())
15675     return SDValue();
15676 
15677   SDValue Op1 = LHS->getOperand(0);
15678   SDValue Op2 = RHS->getOperand(0);
15679   EVT OpVT1 = Op1.getValueType();
15680   EVT OpVT2 = Op2.getValueType();
15681   if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 ||
15682       Op2.getOpcode() != AArch64ISD::UADDV ||
15683       OpVT1.getVectorElementType() != VT)
15684     return SDValue();
15685 
15686   SDValue Val1 = Op1.getOperand(0);
15687   SDValue Val2 = Op2.getOperand(0);
15688   EVT ValVT = Val1->getValueType(0);
15689   SDLoc DL(N);
15690   SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2);
15691   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
15692                      DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal),
15693                      DAG.getConstant(0, DL, MVT::i64));
15694 }
15695 
15696 /// Perform the scalar expression combine in the form of:
15697 ///   CSEL(c, 1, cc) + b => CSINC(b+c, b, cc)
15698 ///   CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
15699 static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {
15700   EVT VT = N->getValueType(0);
15701   if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD)
15702     return SDValue();
15703 
15704   SDValue LHS = N->getOperand(0);
15705   SDValue RHS = N->getOperand(1);
15706 
15707   // Handle commutivity.
15708   if (LHS.getOpcode() != AArch64ISD::CSEL &&
15709       LHS.getOpcode() != AArch64ISD::CSNEG) {
15710     std::swap(LHS, RHS);
15711     if (LHS.getOpcode() != AArch64ISD::CSEL &&
15712         LHS.getOpcode() != AArch64ISD::CSNEG) {
15713       return SDValue();
15714     }
15715   }
15716 
15717   if (!LHS.hasOneUse())
15718     return SDValue();
15719 
15720   AArch64CC::CondCode AArch64CC =
15721       static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2));
15722 
15723   // The CSEL should include a const one operand, and the CSNEG should include
15724   // One or NegOne operand.
15725   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0));
15726   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
15727   if (!CTVal || !CFVal)
15728     return SDValue();
15729 
15730   if (!(LHS.getOpcode() == AArch64ISD::CSEL &&
15731         (CTVal->isOne() || CFVal->isOne())) &&
15732       !(LHS.getOpcode() == AArch64ISD::CSNEG &&
15733         (CTVal->isOne() || CFVal->isAllOnes())))
15734     return SDValue();
15735 
15736   // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc)
15737   if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() &&
15738       !CFVal->isOne()) {
15739     std::swap(CTVal, CFVal);
15740     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15741   }
15742 
15743   SDLoc DL(N);
15744   // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc)
15745   if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() &&
15746       !CFVal->isAllOnes()) {
15747     APInt C = -1 * CFVal->getAPIntValue();
15748     CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT));
15749     CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT));
15750     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15751   }
15752 
15753   // It might be neutral for larger constants, as the immediate need to be
15754   // materialized in a register.
15755   APInt ADDC = CTVal->getAPIntValue();
15756   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15757   if (!TLI.isLegalAddImmediate(ADDC.getSExtValue()))
15758     return SDValue();
15759 
15760   assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||
15761           (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&
15762          "Unexpected constant value");
15763 
15764   SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0));
15765   SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32);
15766   SDValue Cmp = LHS.getOperand(3);
15767 
15768   return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp);
15769 }
15770 
15771 // ADD(UDOT(zero, x, y), A) -->  UDOT(A, x, y)
15772 static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
15773   EVT VT = N->getValueType(0);
15774   if (N->getOpcode() != ISD::ADD)
15775     return SDValue();
15776 
15777   SDValue Dot = N->getOperand(0);
15778   SDValue A = N->getOperand(1);
15779   // Handle commutivity
15780   auto isZeroDot = [](SDValue Dot) {
15781     return (Dot.getOpcode() == AArch64ISD::UDOT ||
15782             Dot.getOpcode() == AArch64ISD::SDOT) &&
15783            isZerosVector(Dot.getOperand(0).getNode());
15784   };
15785   if (!isZeroDot(Dot))
15786     std::swap(Dot, A);
15787   if (!isZeroDot(Dot))
15788     return SDValue();
15789 
15790   return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
15791                      Dot.getOperand(2));
15792 }
15793 
15794 static bool isNegatedInteger(SDValue Op) {
15795   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
15796 }
15797 
15798 static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {
15799   SDLoc DL(Op);
15800   EVT VT = Op.getValueType();
15801   SDValue Zero = DAG.getConstant(0, DL, VT);
15802   return DAG.getNode(ISD::SUB, DL, VT, Zero, Op);
15803 }
15804 
15805 // Try to fold
15806 //
15807 // (neg (csel X, Y)) -> (csel (neg X), (neg Y))
15808 //
15809 // The folding helps csel to be matched with csneg without generating
15810 // redundant neg instruction, which includes negation of the csel expansion
15811 // of abs node lowered by lowerABS.
15812 static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {
15813   if (!isNegatedInteger(SDValue(N, 0)))
15814     return SDValue();
15815 
15816   SDValue CSel = N->getOperand(1);
15817   if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse())
15818     return SDValue();
15819 
15820   SDValue N0 = CSel.getOperand(0);
15821   SDValue N1 = CSel.getOperand(1);
15822 
15823   // If both of them is not negations, it's not worth the folding as it
15824   // introduces two additional negations while reducing one negation.
15825   if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
15826     return SDValue();
15827 
15828   SDValue N0N = getNegatedInteger(N0, DAG);
15829   SDValue N1N = getNegatedInteger(N1, DAG);
15830 
15831   SDLoc DL(N);
15832   EVT VT = CSel.getValueType();
15833   return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2),
15834                      CSel.getOperand(3));
15835 }
15836 
15837 // The basic add/sub long vector instructions have variants with "2" on the end
15838 // which act on the high-half of their inputs. They are normally matched by
15839 // patterns like:
15840 //
15841 // (add (zeroext (extract_high LHS)),
15842 //      (zeroext (extract_high RHS)))
15843 // -> uaddl2 vD, vN, vM
15844 //
15845 // However, if one of the extracts is something like a duplicate, this
15846 // instruction can still be used profitably. This function puts the DAG into a
15847 // more appropriate form for those patterns to trigger.
15848 static SDValue performAddSubLongCombine(SDNode *N,
15849                                         TargetLowering::DAGCombinerInfo &DCI,
15850                                         SelectionDAG &DAG) {
15851   if (DCI.isBeforeLegalizeOps())
15852     return SDValue();
15853 
15854   MVT VT = N->getSimpleValueType(0);
15855   if (!VT.is128BitVector()) {
15856     if (N->getOpcode() == ISD::ADD)
15857       return performSetccAddFolding(N, DAG);
15858     return SDValue();
15859   }
15860 
15861   // Make sure both branches are extended in the same way.
15862   SDValue LHS = N->getOperand(0);
15863   SDValue RHS = N->getOperand(1);
15864   if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
15865        LHS.getOpcode() != ISD::SIGN_EXTEND) ||
15866       LHS.getOpcode() != RHS.getOpcode())
15867     return SDValue();
15868 
15869   unsigned ExtType = LHS.getOpcode();
15870 
15871   // It's not worth doing if at least one of the inputs isn't already an
15872   // extract, but we don't know which it'll be so we have to try both.
15873   if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) {
15874     RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
15875     if (!RHS.getNode())
15876       return SDValue();
15877 
15878     RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
15879   } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) {
15880     LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
15881     if (!LHS.getNode())
15882       return SDValue();
15883 
15884     LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
15885   }
15886 
15887   return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
15888 }
15889 
15890 static bool isCMP(SDValue Op) {
15891   return Op.getOpcode() == AArch64ISD::SUBS &&
15892          !Op.getNode()->hasAnyUseOfValue(0);
15893 }
15894 
15895 // (CSEL 1 0 CC Cond) => CC
15896 // (CSEL 0 1 CC Cond) => !CC
15897 static Optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {
15898   if (Op.getOpcode() != AArch64ISD::CSEL)
15899     return None;
15900   auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
15901   if (CC == AArch64CC::AL || CC == AArch64CC::NV)
15902     return None;
15903   SDValue OpLHS = Op.getOperand(0);
15904   SDValue OpRHS = Op.getOperand(1);
15905   if (isOneConstant(OpLHS) && isNullConstant(OpRHS))
15906     return CC;
15907   if (isNullConstant(OpLHS) && isOneConstant(OpRHS))
15908     return getInvertedCondCode(CC);
15909 
15910   return None;
15911 }
15912 
15913 // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry)
15914 // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
15915 static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {
15916   SDValue CmpOp = Op->getOperand(2);
15917   if (!isCMP(CmpOp))
15918     return SDValue();
15919 
15920   if (IsAdd) {
15921     if (!isOneConstant(CmpOp.getOperand(1)))
15922       return SDValue();
15923   } else {
15924     if (!isNullConstant(CmpOp.getOperand(0)))
15925       return SDValue();
15926   }
15927 
15928   SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1);
15929   auto CC = getCSETCondCode(CsetOp);
15930   if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO))
15931     return SDValue();
15932 
15933   return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(),
15934                      Op->getOperand(0), Op->getOperand(1),
15935                      CsetOp.getOperand(3));
15936 }
15937 
15938 // (ADC x 0 cond) => (CINC x HS cond)
15939 static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {
15940   SDValue LHS = N->getOperand(0);
15941   SDValue RHS = N->getOperand(1);
15942   SDValue Cond = N->getOperand(2);
15943 
15944   if (!isNullConstant(RHS))
15945     return SDValue();
15946 
15947   EVT VT = N->getValueType(0);
15948   SDLoc DL(N);
15949 
15950   // (CINC x cc cond) <=> (CSINC x x !cc cond)
15951   SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32);
15952   return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond);
15953 }
15954 
15955 // Transform vector add(zext i8 to i32, zext i8 to i32)
15956 //  into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32)
15957 // This allows extra uses of saddl/uaddl at the lower vector widths, and less
15958 // extends.
15959 static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) {
15960   EVT VT = N->getValueType(0);
15961   if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 ||
15962       (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
15963        N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) ||
15964       (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
15965        N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) ||
15966       N->getOperand(0).getOperand(0).getValueType() !=
15967           N->getOperand(1).getOperand(0).getValueType())
15968     return SDValue();
15969 
15970   SDValue N0 = N->getOperand(0).getOperand(0);
15971   SDValue N1 = N->getOperand(1).getOperand(0);
15972   EVT InVT = N0.getValueType();
15973 
15974   EVT S1 = InVT.getScalarType();
15975   EVT S2 = VT.getScalarType();
15976   if ((S2 == MVT::i32 && S1 == MVT::i8) ||
15977       (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) {
15978     SDLoc DL(N);
15979     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
15980                                   S2.getHalfSizedIntegerVT(*DAG.getContext()),
15981                                   VT.getVectorElementCount());
15982     SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0);
15983     SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1);
15984     SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1);
15985     return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp);
15986   }
15987   return SDValue();
15988 }
15989 
15990 static SDValue performBuildVectorCombine(SDNode *N,
15991                                          TargetLowering::DAGCombinerInfo &DCI,
15992                                          SelectionDAG &DAG) {
15993   SDLoc DL(N);
15994 
15995   // A build vector of two extracted elements is equivalent to an
15996   // extract subvector where the inner vector is any-extended to the
15997   // extract_vector_elt VT.
15998   //    (build_vector (extract_elt_iXX_to_i32 vec Idx+0)
15999   //                  (extract_elt_iXX_to_i32 vec Idx+1))
16000   // => (extract_subvector (anyext_iXX_to_i32 vec) Idx)
16001 
16002   // For now, only consider the v2i32 case, which arises as a result of
16003   // legalization.
16004   if (N->getValueType(0) != MVT::v2i32)
16005     return SDValue();
16006 
16007   SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1);
16008   // Reminder, EXTRACT_VECTOR_ELT has the effect of any-extending to its VT.
16009   if (Elt0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
16010       Elt1->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
16011       // Constant index.
16012       isa<ConstantSDNode>(Elt0->getOperand(1)) &&
16013       isa<ConstantSDNode>(Elt1->getOperand(1)) &&
16014       // Both EXTRACT_VECTOR_ELT from same vector...
16015       Elt0->getOperand(0) == Elt1->getOperand(0) &&
16016       // ... and contiguous. First element's index +1 == second element's index.
16017       Elt0->getConstantOperandVal(1) + 1 == Elt1->getConstantOperandVal(1)) {
16018     SDValue VecToExtend = Elt0->getOperand(0);
16019     EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(MVT::i32);
16020     if (!DAG.getTargetLoweringInfo().isTypeLegal(ExtVT))
16021       return SDValue();
16022 
16023     SDValue SubvectorIdx = DAG.getVectorIdxConstant(Elt0->getConstantOperandVal(1), DL);
16024 
16025     SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, DL, ExtVT, VecToExtend);
16026     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Ext,
16027                        SubvectorIdx);
16028   }
16029 
16030   return SDValue();
16031 }
16032 
16033 static SDValue performAddSubCombine(SDNode *N,
16034                                     TargetLowering::DAGCombinerInfo &DCI,
16035                                     SelectionDAG &DAG) {
16036   // Try to change sum of two reductions.
16037   if (SDValue Val = performAddUADDVCombine(N, DAG))
16038     return Val;
16039   if (SDValue Val = performAddDotCombine(N, DAG))
16040     return Val;
16041   if (SDValue Val = performAddCSelIntoCSinc(N, DAG))
16042     return Val;
16043   if (SDValue Val = performNegCSelCombine(N, DAG))
16044     return Val;
16045   if (SDValue Val = performVectorAddSubExtCombine(N, DAG))
16046     return Val;
16047 
16048   return performAddSubLongCombine(N, DCI, DAG);
16049 }
16050 
16051 // Massage DAGs which we can use the high-half "long" operations on into
16052 // something isel will recognize better. E.g.
16053 //
16054 // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
16055 //   (aarch64_neon_umull (extract_high (v2i64 vec)))
16056 //                     (extract_high (v2i64 (dup128 scalar)))))
16057 //
16058 static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
16059                                        TargetLowering::DAGCombinerInfo &DCI,
16060                                        SelectionDAG &DAG) {
16061   if (DCI.isBeforeLegalizeOps())
16062     return SDValue();
16063 
16064   SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1);
16065   SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2);
16066   assert(LHS.getValueType().is64BitVector() &&
16067          RHS.getValueType().is64BitVector() &&
16068          "unexpected shape for long operation");
16069 
16070   // Either node could be a DUP, but it's not worth doing both of them (you'd
16071   // just as well use the non-high version) so look for a corresponding extract
16072   // operation on the other "wing".
16073   if (isEssentiallyExtractHighSubvector(LHS)) {
16074     RHS = tryExtendDUPToExtractHigh(RHS, DAG);
16075     if (!RHS.getNode())
16076       return SDValue();
16077   } else if (isEssentiallyExtractHighSubvector(RHS)) {
16078     LHS = tryExtendDUPToExtractHigh(LHS, DAG);
16079     if (!LHS.getNode())
16080       return SDValue();
16081   }
16082 
16083   if (IID == Intrinsic::not_intrinsic)
16084     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
16085 
16086   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
16087                      N->getOperand(0), LHS, RHS);
16088 }
16089 
16090 static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
16091   MVT ElemTy = N->getSimpleValueType(0).getScalarType();
16092   unsigned ElemBits = ElemTy.getSizeInBits();
16093 
16094   int64_t ShiftAmount;
16095   if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
16096     APInt SplatValue, SplatUndef;
16097     unsigned SplatBitSize;
16098     bool HasAnyUndefs;
16099     if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
16100                               HasAnyUndefs, ElemBits) ||
16101         SplatBitSize != ElemBits)
16102       return SDValue();
16103 
16104     ShiftAmount = SplatValue.getSExtValue();
16105   } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
16106     ShiftAmount = CVN->getSExtValue();
16107   } else
16108     return SDValue();
16109 
16110   unsigned Opcode;
16111   bool IsRightShift;
16112   switch (IID) {
16113   default:
16114     llvm_unreachable("Unknown shift intrinsic");
16115   case Intrinsic::aarch64_neon_sqshl:
16116     Opcode = AArch64ISD::SQSHL_I;
16117     IsRightShift = false;
16118     break;
16119   case Intrinsic::aarch64_neon_uqshl:
16120     Opcode = AArch64ISD::UQSHL_I;
16121     IsRightShift = false;
16122     break;
16123   case Intrinsic::aarch64_neon_srshl:
16124     Opcode = AArch64ISD::SRSHR_I;
16125     IsRightShift = true;
16126     break;
16127   case Intrinsic::aarch64_neon_urshl:
16128     Opcode = AArch64ISD::URSHR_I;
16129     IsRightShift = true;
16130     break;
16131   case Intrinsic::aarch64_neon_sqshlu:
16132     Opcode = AArch64ISD::SQSHLU_I;
16133     IsRightShift = false;
16134     break;
16135   case Intrinsic::aarch64_neon_sshl:
16136   case Intrinsic::aarch64_neon_ushl:
16137     // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
16138     // left shift for positive shift amounts. Below, we only replace the current
16139     // node with VSHL, if this condition is met.
16140     Opcode = AArch64ISD::VSHL;
16141     IsRightShift = false;
16142     break;
16143   }
16144 
16145   if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
16146     SDLoc dl(N);
16147     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16148                        DAG.getConstant(-ShiftAmount, dl, MVT::i32));
16149   } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
16150     SDLoc dl(N);
16151     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16152                        DAG.getConstant(ShiftAmount, dl, MVT::i32));
16153   }
16154 
16155   return SDValue();
16156 }
16157 
16158 // The CRC32[BH] instructions ignore the high bits of their data operand. Since
16159 // the intrinsics must be legal and take an i32, this means there's almost
16160 // certainly going to be a zext in the DAG which we can eliminate.
16161 static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
16162   SDValue AndN = N->getOperand(2);
16163   if (AndN.getOpcode() != ISD::AND)
16164     return SDValue();
16165 
16166   ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
16167   if (!CMask || CMask->getZExtValue() != Mask)
16168     return SDValue();
16169 
16170   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
16171                      N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
16172 }
16173 
16174 static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
16175                                            SelectionDAG &DAG) {
16176   SDLoc dl(N);
16177   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
16178                      DAG.getNode(Opc, dl,
16179                                  N->getOperand(1).getSimpleValueType(),
16180                                  N->getOperand(1)),
16181                      DAG.getConstant(0, dl, MVT::i64));
16182 }
16183 
16184 static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {
16185   SDLoc DL(N);
16186   SDValue Op1 = N->getOperand(1);
16187   SDValue Op2 = N->getOperand(2);
16188   EVT ScalarTy = Op2.getValueType();
16189   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16190     ScalarTy = MVT::i32;
16191 
16192   // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base).
16193   SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0));
16194   SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2);
16195   SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step);
16196   SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1);
16197   return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base);
16198 }
16199 
16200 static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
16201   SDLoc dl(N);
16202   SDValue Scalar = N->getOperand(3);
16203   EVT ScalarTy = Scalar.getValueType();
16204 
16205   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16206     Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
16207 
16208   SDValue Passthru = N->getOperand(1);
16209   SDValue Pred = N->getOperand(2);
16210   return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0),
16211                      Pred, Scalar, Passthru);
16212 }
16213 
16214 static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
16215   SDLoc dl(N);
16216   LLVMContext &Ctx = *DAG.getContext();
16217   EVT VT = N->getValueType(0);
16218 
16219   assert(VT.isScalableVector() && "Expected a scalable vector.");
16220 
16221   // Current lowering only supports the SVE-ACLE types.
16222   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
16223     return SDValue();
16224 
16225   unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
16226   unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8;
16227   EVT ByteVT =
16228       EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
16229 
16230   // Convert everything to the domain of EXT (i.e bytes).
16231   SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1));
16232   SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2));
16233   SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3),
16234                             DAG.getConstant(ElemSize, dl, MVT::i32));
16235 
16236   SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2);
16237   return DAG.getNode(ISD::BITCAST, dl, VT, EXT);
16238 }
16239 
16240 static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
16241                                         TargetLowering::DAGCombinerInfo &DCI,
16242                                         SelectionDAG &DAG) {
16243   if (DCI.isBeforeLegalize())
16244     return SDValue();
16245 
16246   SDValue Comparator = N->getOperand(3);
16247   if (Comparator.getOpcode() == AArch64ISD::DUP ||
16248       Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
16249     unsigned IID = getIntrinsicID(N);
16250     EVT VT = N->getValueType(0);
16251     EVT CmpVT = N->getOperand(2).getValueType();
16252     SDValue Pred = N->getOperand(1);
16253     SDValue Imm;
16254     SDLoc DL(N);
16255 
16256     switch (IID) {
16257     default:
16258       llvm_unreachable("Called with wrong intrinsic!");
16259       break;
16260 
16261     // Signed comparisons
16262     case Intrinsic::aarch64_sve_cmpeq_wide:
16263     case Intrinsic::aarch64_sve_cmpne_wide:
16264     case Intrinsic::aarch64_sve_cmpge_wide:
16265     case Intrinsic::aarch64_sve_cmpgt_wide:
16266     case Intrinsic::aarch64_sve_cmplt_wide:
16267     case Intrinsic::aarch64_sve_cmple_wide: {
16268       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16269         int64_t ImmVal = CN->getSExtValue();
16270         if (ImmVal >= -16 && ImmVal <= 15)
16271           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16272         else
16273           return SDValue();
16274       }
16275       break;
16276     }
16277     // Unsigned comparisons
16278     case Intrinsic::aarch64_sve_cmphs_wide:
16279     case Intrinsic::aarch64_sve_cmphi_wide:
16280     case Intrinsic::aarch64_sve_cmplo_wide:
16281     case Intrinsic::aarch64_sve_cmpls_wide:  {
16282       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16283         uint64_t ImmVal = CN->getZExtValue();
16284         if (ImmVal <= 127)
16285           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16286         else
16287           return SDValue();
16288       }
16289       break;
16290     }
16291     }
16292 
16293     if (!Imm)
16294       return SDValue();
16295 
16296     SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
16297     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred,
16298                        N->getOperand(2), Splat, DAG.getCondCode(CC));
16299   }
16300 
16301   return SDValue();
16302 }
16303 
16304 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
16305                         AArch64CC::CondCode Cond) {
16306   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16307 
16308   SDLoc DL(Op);
16309   assert(Op.getValueType().isScalableVector() &&
16310          TLI.isTypeLegal(Op.getValueType()) &&
16311          "Expected legal scalable vector type!");
16312   assert(Op.getValueType() == Pg.getValueType() &&
16313          "Expected same type for PTEST operands");
16314 
16315   // Ensure target specific opcodes are using legal type.
16316   EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
16317   SDValue TVal = DAG.getConstant(1, DL, OutVT);
16318   SDValue FVal = DAG.getConstant(0, DL, OutVT);
16319 
16320   // Ensure operands have type nxv16i1.
16321   if (Op.getValueType() != MVT::nxv16i1) {
16322     if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) &&
16323         isZeroingInactiveLanes(Op))
16324       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg);
16325     else
16326       Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG);
16327     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op);
16328   }
16329 
16330   // Set condition code (CC) flags.
16331   SDValue Test = DAG.getNode(AArch64ISD::PTEST, DL, MVT::Other, Pg, Op);
16332 
16333   // Convert CC to integer based on requested condition.
16334   // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare.
16335   SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32);
16336   SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test);
16337   return DAG.getZExtOrTrunc(Res, DL, VT);
16338 }
16339 
16340 static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
16341                                       SelectionDAG &DAG) {
16342   SDLoc DL(N);
16343 
16344   SDValue Pred = N->getOperand(1);
16345   SDValue VecToReduce = N->getOperand(2);
16346 
16347   // NOTE: The integer reduction's result type is not always linked to the
16348   // operand's element type so we construct it from the intrinsic's result type.
16349   EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0));
16350   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16351 
16352   // SVE reductions set the whole vector register with the first element
16353   // containing the reduction result, which we'll now extract.
16354   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16355   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16356                      Zero);
16357 }
16358 
16359 static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
16360                                      SelectionDAG &DAG) {
16361   SDLoc DL(N);
16362 
16363   SDValue Pred = N->getOperand(1);
16364   SDValue VecToReduce = N->getOperand(2);
16365 
16366   EVT ReduceVT = VecToReduce.getValueType();
16367   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16368 
16369   // SVE reductions set the whole vector register with the first element
16370   // containing the reduction result, which we'll now extract.
16371   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16372   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16373                      Zero);
16374 }
16375 
16376 static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
16377                                             SelectionDAG &DAG) {
16378   SDLoc DL(N);
16379 
16380   SDValue Pred = N->getOperand(1);
16381   SDValue InitVal = N->getOperand(2);
16382   SDValue VecToReduce = N->getOperand(3);
16383   EVT ReduceVT = VecToReduce.getValueType();
16384 
16385   // Ordered reductions use the first lane of the result vector as the
16386   // reduction's initial value.
16387   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16388   InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT,
16389                         DAG.getUNDEF(ReduceVT), InitVal, Zero);
16390 
16391   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce);
16392 
16393   // SVE reductions set the whole vector register with the first element
16394   // containing the reduction result, which we'll now extract.
16395   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16396                      Zero);
16397 }
16398 
16399 static bool isAllInactivePredicate(SDValue N) {
16400   // Look through cast.
16401   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
16402     N = N.getOperand(0);
16403 
16404   return ISD::isConstantSplatVectorAllZeros(N.getNode());
16405 }
16406 
16407 static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
16408   unsigned NumElts = N.getValueType().getVectorMinNumElements();
16409 
16410   // Look through cast.
16411   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
16412     N = N.getOperand(0);
16413     // When reinterpreting from a type with fewer elements the "new" elements
16414     // are not active, so bail if they're likely to be used.
16415     if (N.getValueType().getVectorMinNumElements() < NumElts)
16416       return false;
16417   }
16418 
16419   if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
16420     return true;
16421 
16422   // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
16423   // or smaller than the implicit element type represented by N.
16424   // NOTE: A larger element count implies a smaller element type.
16425   if (N.getOpcode() == AArch64ISD::PTRUE &&
16426       N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
16427     return N.getValueType().getVectorMinNumElements() >= NumElts;
16428 
16429   // If we're compiling for a specific vector-length, we can check if the
16430   // pattern's VL equals that of the scalable vector at runtime.
16431   if (N.getOpcode() == AArch64ISD::PTRUE) {
16432     const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
16433     unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
16434     unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
16435     if (MaxSVESize && MinSVESize == MaxSVESize) {
16436       unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
16437       unsigned PatNumElts =
16438           getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
16439       return PatNumElts == (NumElts * VScale);
16440     }
16441   }
16442 
16443   return false;
16444 }
16445 
16446 // If a merged operation has no inactive lanes we can relax it to a predicated
16447 // or unpredicated operation, which potentially allows better isel (perhaps
16448 // using immediate forms) or relaxing register reuse requirements.
16449 static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
16450                                        SelectionDAG &DAG, bool UnpredOp = false,
16451                                        bool SwapOperands = false) {
16452   assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!");
16453   assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!");
16454   SDValue Pg = N->getOperand(1);
16455   SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2);
16456   SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
16457 
16458   // ISD way to specify an all active predicate.
16459   if (isAllActivePredicate(DAG, Pg)) {
16460     if (UnpredOp)
16461       return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
16462 
16463     return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2);
16464   }
16465 
16466   // FUTURE: SplatVector(true)
16467   return SDValue();
16468 }
16469 
16470 static SDValue performIntrinsicCombine(SDNode *N,
16471                                        TargetLowering::DAGCombinerInfo &DCI,
16472                                        const AArch64Subtarget *Subtarget) {
16473   SelectionDAG &DAG = DCI.DAG;
16474   unsigned IID = getIntrinsicID(N);
16475   switch (IID) {
16476   default:
16477     break;
16478   case Intrinsic::get_active_lane_mask: {
16479     SDValue Res = SDValue();
16480     EVT VT = N->getValueType(0);
16481     if (VT.isFixedLengthVector()) {
16482       // We can use the SVE whilelo instruction to lower this intrinsic by
16483       // creating the appropriate sequence of scalable vector operations and
16484       // then extracting a fixed-width subvector from the scalable vector.
16485 
16486       SDLoc DL(N);
16487       SDValue ID =
16488           DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
16489 
16490       EVT WhileVT = EVT::getVectorVT(
16491           *DAG.getContext(), MVT::i1,
16492           ElementCount::getScalable(VT.getVectorNumElements()));
16493 
16494       // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32.
16495       EVT PromVT = getPromotedVTForPredicate(WhileVT);
16496 
16497       // Get the fixed-width equivalent of PromVT for extraction.
16498       EVT ExtVT =
16499           EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(),
16500                            VT.getVectorElementCount());
16501 
16502       Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
16503                         N->getOperand(1), N->getOperand(2));
16504       Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res);
16505       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res,
16506                         DAG.getConstant(0, DL, MVT::i64));
16507       Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
16508     }
16509     return Res;
16510   }
16511   case Intrinsic::aarch64_neon_vcvtfxs2fp:
16512   case Intrinsic::aarch64_neon_vcvtfxu2fp:
16513     return tryCombineFixedPointConvert(N, DCI, DAG);
16514   case Intrinsic::aarch64_neon_saddv:
16515     return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
16516   case Intrinsic::aarch64_neon_uaddv:
16517     return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
16518   case Intrinsic::aarch64_neon_sminv:
16519     return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
16520   case Intrinsic::aarch64_neon_uminv:
16521     return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
16522   case Intrinsic::aarch64_neon_smaxv:
16523     return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
16524   case Intrinsic::aarch64_neon_umaxv:
16525     return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
16526   case Intrinsic::aarch64_neon_fmax:
16527     return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0),
16528                        N->getOperand(1), N->getOperand(2));
16529   case Intrinsic::aarch64_neon_fmin:
16530     return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0),
16531                        N->getOperand(1), N->getOperand(2));
16532   case Intrinsic::aarch64_neon_fmaxnm:
16533     return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0),
16534                        N->getOperand(1), N->getOperand(2));
16535   case Intrinsic::aarch64_neon_fminnm:
16536     return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0),
16537                        N->getOperand(1), N->getOperand(2));
16538   case Intrinsic::aarch64_neon_smull:
16539     return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0),
16540                        N->getOperand(1), N->getOperand(2));
16541   case Intrinsic::aarch64_neon_umull:
16542     return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0),
16543                        N->getOperand(1), N->getOperand(2));
16544   case Intrinsic::aarch64_neon_pmull:
16545   case Intrinsic::aarch64_neon_sqdmull:
16546     return tryCombineLongOpWithDup(IID, N, DCI, DAG);
16547   case Intrinsic::aarch64_neon_sqshl:
16548   case Intrinsic::aarch64_neon_uqshl:
16549   case Intrinsic::aarch64_neon_sqshlu:
16550   case Intrinsic::aarch64_neon_srshl:
16551   case Intrinsic::aarch64_neon_urshl:
16552   case Intrinsic::aarch64_neon_sshl:
16553   case Intrinsic::aarch64_neon_ushl:
16554     return tryCombineShiftImm(IID, N, DAG);
16555   case Intrinsic::aarch64_crc32b:
16556   case Intrinsic::aarch64_crc32cb:
16557     return tryCombineCRC32(0xff, N, DAG);
16558   case Intrinsic::aarch64_crc32h:
16559   case Intrinsic::aarch64_crc32ch:
16560     return tryCombineCRC32(0xffff, N, DAG);
16561   case Intrinsic::aarch64_sve_saddv:
16562     // There is no i64 version of SADDV because the sign is irrelevant.
16563     if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64)
16564       return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16565     else
16566       return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG);
16567   case Intrinsic::aarch64_sve_uaddv:
16568     return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16569   case Intrinsic::aarch64_sve_smaxv:
16570     return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG);
16571   case Intrinsic::aarch64_sve_umaxv:
16572     return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG);
16573   case Intrinsic::aarch64_sve_sminv:
16574     return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG);
16575   case Intrinsic::aarch64_sve_uminv:
16576     return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG);
16577   case Intrinsic::aarch64_sve_orv:
16578     return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG);
16579   case Intrinsic::aarch64_sve_eorv:
16580     return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG);
16581   case Intrinsic::aarch64_sve_andv:
16582     return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG);
16583   case Intrinsic::aarch64_sve_index:
16584     return LowerSVEIntrinsicIndex(N, DAG);
16585   case Intrinsic::aarch64_sve_dup:
16586     return LowerSVEIntrinsicDUP(N, DAG);
16587   case Intrinsic::aarch64_sve_dup_x:
16588     return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
16589                        N->getOperand(1));
16590   case Intrinsic::aarch64_sve_ext:
16591     return LowerSVEIntrinsicEXT(N, DAG);
16592   case Intrinsic::aarch64_sve_mul:
16593     return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG);
16594   case Intrinsic::aarch64_sve_smulh:
16595     return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG);
16596   case Intrinsic::aarch64_sve_umulh:
16597     return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG);
16598   case Intrinsic::aarch64_sve_smin:
16599     return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG);
16600   case Intrinsic::aarch64_sve_umin:
16601     return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG);
16602   case Intrinsic::aarch64_sve_smax:
16603     return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG);
16604   case Intrinsic::aarch64_sve_umax:
16605     return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG);
16606   case Intrinsic::aarch64_sve_lsl:
16607     return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG);
16608   case Intrinsic::aarch64_sve_lsr:
16609     return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG);
16610   case Intrinsic::aarch64_sve_asr:
16611     return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG);
16612   case Intrinsic::aarch64_sve_fadd:
16613     return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG);
16614   case Intrinsic::aarch64_sve_fsub:
16615     return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG);
16616   case Intrinsic::aarch64_sve_fmul:
16617     return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG);
16618   case Intrinsic::aarch64_sve_add:
16619     return convertMergedOpToPredOp(N, ISD::ADD, DAG, true);
16620   case Intrinsic::aarch64_sve_sub:
16621     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true);
16622   case Intrinsic::aarch64_sve_subr:
16623     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true);
16624   case Intrinsic::aarch64_sve_and:
16625     return convertMergedOpToPredOp(N, ISD::AND, DAG, true);
16626   case Intrinsic::aarch64_sve_bic:
16627     return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true);
16628   case Intrinsic::aarch64_sve_eor:
16629     return convertMergedOpToPredOp(N, ISD::XOR, DAG, true);
16630   case Intrinsic::aarch64_sve_orr:
16631     return convertMergedOpToPredOp(N, ISD::OR, DAG, true);
16632   case Intrinsic::aarch64_sve_sabd:
16633     return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true);
16634   case Intrinsic::aarch64_sve_uabd:
16635     return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true);
16636   case Intrinsic::aarch64_sve_sqadd:
16637     return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true);
16638   case Intrinsic::aarch64_sve_sqsub:
16639     return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true);
16640   case Intrinsic::aarch64_sve_uqadd:
16641     return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true);
16642   case Intrinsic::aarch64_sve_uqsub:
16643     return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true);
16644   case Intrinsic::aarch64_sve_sqadd_x:
16645     return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0),
16646                        N->getOperand(1), N->getOperand(2));
16647   case Intrinsic::aarch64_sve_sqsub_x:
16648     return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0),
16649                        N->getOperand(1), N->getOperand(2));
16650   case Intrinsic::aarch64_sve_uqadd_x:
16651     return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0),
16652                        N->getOperand(1), N->getOperand(2));
16653   case Intrinsic::aarch64_sve_uqsub_x:
16654     return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0),
16655                        N->getOperand(1), N->getOperand(2));
16656   case Intrinsic::aarch64_sve_asrd:
16657     return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0),
16658                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16659   case Intrinsic::aarch64_sve_cmphs:
16660     if (!N->getOperand(2).getValueType().isFloatingPoint())
16661       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16662                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16663                          N->getOperand(3), DAG.getCondCode(ISD::SETUGE));
16664     break;
16665   case Intrinsic::aarch64_sve_cmphi:
16666     if (!N->getOperand(2).getValueType().isFloatingPoint())
16667       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16668                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16669                          N->getOperand(3), DAG.getCondCode(ISD::SETUGT));
16670     break;
16671   case Intrinsic::aarch64_sve_fcmpge:
16672   case Intrinsic::aarch64_sve_cmpge:
16673     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16674                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16675                        N->getOperand(3), DAG.getCondCode(ISD::SETGE));
16676     break;
16677   case Intrinsic::aarch64_sve_fcmpgt:
16678   case Intrinsic::aarch64_sve_cmpgt:
16679     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16680                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16681                        N->getOperand(3), DAG.getCondCode(ISD::SETGT));
16682     break;
16683   case Intrinsic::aarch64_sve_fcmpeq:
16684   case Intrinsic::aarch64_sve_cmpeq:
16685     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16686                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16687                        N->getOperand(3), DAG.getCondCode(ISD::SETEQ));
16688     break;
16689   case Intrinsic::aarch64_sve_fcmpne:
16690   case Intrinsic::aarch64_sve_cmpne:
16691     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16692                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16693                        N->getOperand(3), DAG.getCondCode(ISD::SETNE));
16694     break;
16695   case Intrinsic::aarch64_sve_fcmpuo:
16696     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16697                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16698                        N->getOperand(3), DAG.getCondCode(ISD::SETUO));
16699     break;
16700   case Intrinsic::aarch64_sve_fadda:
16701     return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
16702   case Intrinsic::aarch64_sve_faddv:
16703     return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG);
16704   case Intrinsic::aarch64_sve_fmaxnmv:
16705     return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG);
16706   case Intrinsic::aarch64_sve_fmaxv:
16707     return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG);
16708   case Intrinsic::aarch64_sve_fminnmv:
16709     return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG);
16710   case Intrinsic::aarch64_sve_fminv:
16711     return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG);
16712   case Intrinsic::aarch64_sve_sel:
16713     return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0),
16714                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16715   case Intrinsic::aarch64_sve_cmpeq_wide:
16716     return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG);
16717   case Intrinsic::aarch64_sve_cmpne_wide:
16718     return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG);
16719   case Intrinsic::aarch64_sve_cmpge_wide:
16720     return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG);
16721   case Intrinsic::aarch64_sve_cmpgt_wide:
16722     return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG);
16723   case Intrinsic::aarch64_sve_cmplt_wide:
16724     return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG);
16725   case Intrinsic::aarch64_sve_cmple_wide:
16726     return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG);
16727   case Intrinsic::aarch64_sve_cmphs_wide:
16728     return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG);
16729   case Intrinsic::aarch64_sve_cmphi_wide:
16730     return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG);
16731   case Intrinsic::aarch64_sve_cmplo_wide:
16732     return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG);
16733   case Intrinsic::aarch64_sve_cmpls_wide:
16734     return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG);
16735   case Intrinsic::aarch64_sve_ptest_any:
16736     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16737                     AArch64CC::ANY_ACTIVE);
16738   case Intrinsic::aarch64_sve_ptest_first:
16739     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16740                     AArch64CC::FIRST_ACTIVE);
16741   case Intrinsic::aarch64_sve_ptest_last:
16742     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16743                     AArch64CC::LAST_ACTIVE);
16744   }
16745   return SDValue();
16746 }
16747 
16748 static bool isCheapToExtend(const SDValue &N) {
16749   unsigned OC = N->getOpcode();
16750   return OC == ISD::LOAD || OC == ISD::MLOAD ||
16751          ISD::isConstantSplatVectorAllZeros(N.getNode());
16752 }
16753 
16754 static SDValue
16755 performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
16756                               SelectionDAG &DAG) {
16757   // If we have (sext (setcc A B)) and A and B are cheap to extend,
16758   // we can move the sext into the arguments and have the same result. For
16759   // example, if A and B are both loads, we can make those extending loads and
16760   // avoid an extra instruction. This pattern appears often in VLS code
16761   // generation where the inputs to the setcc have a different size to the
16762   // instruction that wants to use the result of the setcc.
16763   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
16764          N->getOperand(0)->getOpcode() == ISD::SETCC);
16765   const SDValue SetCC = N->getOperand(0);
16766 
16767   const SDValue CCOp0 = SetCC.getOperand(0);
16768   const SDValue CCOp1 = SetCC.getOperand(1);
16769   if (!CCOp0->getValueType(0).isInteger() ||
16770       !CCOp1->getValueType(0).isInteger())
16771     return SDValue();
16772 
16773   ISD::CondCode Code =
16774       cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get();
16775 
16776   ISD::NodeType ExtType =
16777       isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16778 
16779   if (isCheapToExtend(SetCC.getOperand(0)) &&
16780       isCheapToExtend(SetCC.getOperand(1))) {
16781     const SDValue Ext1 =
16782         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0);
16783     const SDValue Ext2 =
16784         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1);
16785 
16786     return DAG.getSetCC(
16787         SDLoc(SetCC), N->getValueType(0), Ext1, Ext2,
16788         cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get());
16789   }
16790 
16791   return SDValue();
16792 }
16793 
16794 static SDValue performExtendCombine(SDNode *N,
16795                                     TargetLowering::DAGCombinerInfo &DCI,
16796                                     SelectionDAG &DAG) {
16797   // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
16798   // we can convert that DUP into another extract_high (of a bigger DUP), which
16799   // helps the backend to decide that an sabdl2 would be useful, saving a real
16800   // extract_high operation.
16801   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
16802       (N->getOperand(0).getOpcode() == ISD::ABDU ||
16803        N->getOperand(0).getOpcode() == ISD::ABDS)) {
16804     SDNode *ABDNode = N->getOperand(0).getNode();
16805     SDValue NewABD =
16806         tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG);
16807     if (!NewABD.getNode())
16808       return SDValue();
16809 
16810     return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD);
16811   }
16812 
16813   if (N->getValueType(0).isFixedLengthVector() &&
16814       N->getOpcode() == ISD::SIGN_EXTEND &&
16815       N->getOperand(0)->getOpcode() == ISD::SETCC)
16816     return performSignExtendSetCCCombine(N, DCI, DAG);
16817 
16818   return SDValue();
16819 }
16820 
16821 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
16822                                SDValue SplatVal, unsigned NumVecElts) {
16823   assert(!St.isTruncatingStore() && "cannot split truncating vector store");
16824   Align OrigAlignment = St.getAlign();
16825   unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
16826 
16827   // Create scalar stores. This is at least as good as the code sequence for a
16828   // split unaligned store which is a dup.s, ext.b, and two stores.
16829   // Most of the time the three stores should be replaced by store pair
16830   // instructions (stp).
16831   SDLoc DL(&St);
16832   SDValue BasePtr = St.getBasePtr();
16833   uint64_t BaseOffset = 0;
16834 
16835   const MachinePointerInfo &PtrInfo = St.getPointerInfo();
16836   SDValue NewST1 =
16837       DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
16838                    OrigAlignment, St.getMemOperand()->getFlags());
16839 
16840   // As this in ISel, we will not merge this add which may degrade results.
16841   if (BasePtr->getOpcode() == ISD::ADD &&
16842       isa<ConstantSDNode>(BasePtr->getOperand(1))) {
16843     BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
16844     BasePtr = BasePtr->getOperand(0);
16845   }
16846 
16847   unsigned Offset = EltOffset;
16848   while (--NumVecElts) {
16849     Align Alignment = commonAlignment(OrigAlignment, Offset);
16850     SDValue OffsetPtr =
16851         DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
16852                     DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
16853     NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
16854                           PtrInfo.getWithOffset(Offset), Alignment,
16855                           St.getMemOperand()->getFlags());
16856     Offset += EltOffset;
16857   }
16858   return NewST1;
16859 }
16860 
16861 // Returns an SVE type that ContentTy can be trivially sign or zero extended
16862 // into.
16863 static MVT getSVEContainerType(EVT ContentTy) {
16864   assert(ContentTy.isSimple() && "No SVE containers for extended types");
16865 
16866   switch (ContentTy.getSimpleVT().SimpleTy) {
16867   default:
16868     llvm_unreachable("No known SVE container for this MVT type");
16869   case MVT::nxv2i8:
16870   case MVT::nxv2i16:
16871   case MVT::nxv2i32:
16872   case MVT::nxv2i64:
16873   case MVT::nxv2f32:
16874   case MVT::nxv2f64:
16875     return MVT::nxv2i64;
16876   case MVT::nxv4i8:
16877   case MVT::nxv4i16:
16878   case MVT::nxv4i32:
16879   case MVT::nxv4f32:
16880     return MVT::nxv4i32;
16881   case MVT::nxv8i8:
16882   case MVT::nxv8i16:
16883   case MVT::nxv8f16:
16884   case MVT::nxv8bf16:
16885     return MVT::nxv8i16;
16886   case MVT::nxv16i8:
16887     return MVT::nxv16i8;
16888   }
16889 }
16890 
16891 static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {
16892   SDLoc DL(N);
16893   EVT VT = N->getValueType(0);
16894 
16895   if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
16896     return SDValue();
16897 
16898   EVT ContainerVT = VT;
16899   if (ContainerVT.isInteger())
16900     ContainerVT = getSVEContainerType(ContainerVT);
16901 
16902   SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other);
16903   SDValue Ops[] = { N->getOperand(0), // Chain
16904                     N->getOperand(2), // Pg
16905                     N->getOperand(3), // Base
16906                     DAG.getValueType(VT) };
16907 
16908   SDValue Load = DAG.getNode(Opc, DL, VTs, Ops);
16909   SDValue LoadChain = SDValue(Load.getNode(), 1);
16910 
16911   if (ContainerVT.isInteger() && (VT != ContainerVT))
16912     Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0));
16913 
16914   return DAG.getMergeValues({ Load, LoadChain }, DL);
16915 }
16916 
16917 static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
16918   SDLoc DL(N);
16919   EVT VT = N->getValueType(0);
16920   EVT PtrTy = N->getOperand(3).getValueType();
16921 
16922   EVT LoadVT = VT;
16923   if (VT.isFloatingPoint())
16924     LoadVT = VT.changeTypeToInteger();
16925 
16926   auto *MINode = cast<MemIntrinsicSDNode>(N);
16927   SDValue PassThru = DAG.getConstant(0, DL, LoadVT);
16928   SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(),
16929                                 MINode->getOperand(3), DAG.getUNDEF(PtrTy),
16930                                 MINode->getOperand(2), PassThru,
16931                                 MINode->getMemoryVT(), MINode->getMemOperand(),
16932                                 ISD::UNINDEXED, ISD::NON_EXTLOAD, false);
16933 
16934    if (VT.isFloatingPoint()) {
16935      SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) };
16936      return DAG.getMergeValues(Ops, DL);
16937    }
16938 
16939   return L;
16940 }
16941 
16942 template <unsigned Opcode>
16943 static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
16944   static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO ||
16945                     Opcode == AArch64ISD::LD1RO_MERGE_ZERO,
16946                 "Unsupported opcode.");
16947   SDLoc DL(N);
16948   EVT VT = N->getValueType(0);
16949 
16950   EVT LoadVT = VT;
16951   if (VT.isFloatingPoint())
16952     LoadVT = VT.changeTypeToInteger();
16953 
16954   SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
16955   SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
16956   SDValue LoadChain = SDValue(Load.getNode(), 1);
16957 
16958   if (VT.isFloatingPoint())
16959     Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
16960 
16961   return DAG.getMergeValues({Load, LoadChain}, DL);
16962 }
16963 
16964 static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
16965   SDLoc DL(N);
16966   SDValue Data = N->getOperand(2);
16967   EVT DataVT = Data.getValueType();
16968   EVT HwSrcVt = getSVEContainerType(DataVT);
16969   SDValue InputVT = DAG.getValueType(DataVT);
16970 
16971   if (DataVT.isFloatingPoint())
16972     InputVT = DAG.getValueType(HwSrcVt);
16973 
16974   SDValue SrcNew;
16975   if (Data.getValueType().isFloatingPoint())
16976     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data);
16977   else
16978     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data);
16979 
16980   SDValue Ops[] = { N->getOperand(0), // Chain
16981                     SrcNew,
16982                     N->getOperand(4), // Base
16983                     N->getOperand(3), // Pg
16984                     InputVT
16985                   };
16986 
16987   return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops);
16988 }
16989 
16990 static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
16991   SDLoc DL(N);
16992 
16993   SDValue Data = N->getOperand(2);
16994   EVT DataVT = Data.getValueType();
16995   EVT PtrTy = N->getOperand(4).getValueType();
16996 
16997   if (DataVT.isFloatingPoint())
16998     Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data);
16999 
17000   auto *MINode = cast<MemIntrinsicSDNode>(N);
17001   return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4),
17002                             DAG.getUNDEF(PtrTy), MINode->getOperand(3),
17003                             MINode->getMemoryVT(), MINode->getMemOperand(),
17004                             ISD::UNINDEXED, false, false);
17005 }
17006 
17007 /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.  The
17008 /// load store optimizer pass will merge them to store pair stores.  This should
17009 /// be better than a movi to create the vector zero followed by a vector store
17010 /// if the zero constant is not re-used, since one instructions and one register
17011 /// live range will be removed.
17012 ///
17013 /// For example, the final generated code should be:
17014 ///
17015 ///   stp xzr, xzr, [x0]
17016 ///
17017 /// instead of:
17018 ///
17019 ///   movi v0.2d, #0
17020 ///   str q0, [x0]
17021 ///
17022 static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
17023   SDValue StVal = St.getValue();
17024   EVT VT = StVal.getValueType();
17025 
17026   // Avoid scalarizing zero splat stores for scalable vectors.
17027   if (VT.isScalableVector())
17028     return SDValue();
17029 
17030   // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or
17031   // 2, 3 or 4 i32 elements.
17032   int NumVecElts = VT.getVectorNumElements();
17033   if (!(((NumVecElts == 2 || NumVecElts == 3) &&
17034          VT.getVectorElementType().getSizeInBits() == 64) ||
17035         ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
17036          VT.getVectorElementType().getSizeInBits() == 32)))
17037     return SDValue();
17038 
17039   if (StVal.getOpcode() != ISD::BUILD_VECTOR)
17040     return SDValue();
17041 
17042   // If the zero constant has more than one use then the vector store could be
17043   // better since the constant mov will be amortized and stp q instructions
17044   // should be able to be formed.
17045   if (!StVal.hasOneUse())
17046     return SDValue();
17047 
17048   // If the store is truncating then it's going down to i16 or smaller, which
17049   // means it can be implemented in a single store anyway.
17050   if (St.isTruncatingStore())
17051     return SDValue();
17052 
17053   // If the immediate offset of the address operand is too large for the stp
17054   // instruction, then bail out.
17055   if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
17056     int64_t Offset = St.getBasePtr()->getConstantOperandVal(1);
17057     if (Offset < -512 || Offset > 504)
17058       return SDValue();
17059   }
17060 
17061   for (int I = 0; I < NumVecElts; ++I) {
17062     SDValue EltVal = StVal.getOperand(I);
17063     if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal))
17064       return SDValue();
17065   }
17066 
17067   // Use a CopyFromReg WZR/XZR here to prevent
17068   // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
17069   SDLoc DL(&St);
17070   unsigned ZeroReg;
17071   EVT ZeroVT;
17072   if (VT.getVectorElementType().getSizeInBits() == 32) {
17073     ZeroReg = AArch64::WZR;
17074     ZeroVT = MVT::i32;
17075   } else {
17076     ZeroReg = AArch64::XZR;
17077     ZeroVT = MVT::i64;
17078   }
17079   SDValue SplatVal =
17080       DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
17081   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17082 }
17083 
17084 /// Replace a splat of a scalar to a vector store by scalar stores of the scalar
17085 /// value. The load store optimizer pass will merge them to store pair stores.
17086 /// This has better performance than a splat of the scalar followed by a split
17087 /// vector store. Even if the stores are not merged it is four stores vs a dup,
17088 /// followed by an ext.b and two stores.
17089 static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
17090   SDValue StVal = St.getValue();
17091   EVT VT = StVal.getValueType();
17092 
17093   // Don't replace floating point stores, they possibly won't be transformed to
17094   // stp because of the store pair suppress pass.
17095   if (VT.isFloatingPoint())
17096     return SDValue();
17097 
17098   // We can express a splat as store pair(s) for 2 or 4 elements.
17099   unsigned NumVecElts = VT.getVectorNumElements();
17100   if (NumVecElts != 4 && NumVecElts != 2)
17101     return SDValue();
17102 
17103   // If the store is truncating then it's going down to i16 or smaller, which
17104   // means it can be implemented in a single store anyway.
17105   if (St.isTruncatingStore())
17106     return SDValue();
17107 
17108   // Check that this is a splat.
17109   // Make sure that each of the relevant vector element locations are inserted
17110   // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
17111   std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
17112   SDValue SplatVal;
17113   for (unsigned I = 0; I < NumVecElts; ++I) {
17114     // Check for insert vector elements.
17115     if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
17116       return SDValue();
17117 
17118     // Check that same value is inserted at each vector element.
17119     if (I == 0)
17120       SplatVal = StVal.getOperand(1);
17121     else if (StVal.getOperand(1) != SplatVal)
17122       return SDValue();
17123 
17124     // Check insert element index.
17125     ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2));
17126     if (!CIndex)
17127       return SDValue();
17128     uint64_t IndexVal = CIndex->getZExtValue();
17129     if (IndexVal >= NumVecElts)
17130       return SDValue();
17131     IndexNotInserted.reset(IndexVal);
17132 
17133     StVal = StVal.getOperand(0);
17134   }
17135   // Check that all vector element locations were inserted to.
17136   if (IndexNotInserted.any())
17137       return SDValue();
17138 
17139   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17140 }
17141 
17142 static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17143                            SelectionDAG &DAG,
17144                            const AArch64Subtarget *Subtarget) {
17145 
17146   StoreSDNode *S = cast<StoreSDNode>(N);
17147   if (S->isVolatile() || S->isIndexed())
17148     return SDValue();
17149 
17150   SDValue StVal = S->getValue();
17151   EVT VT = StVal.getValueType();
17152 
17153   if (!VT.isFixedLengthVector())
17154     return SDValue();
17155 
17156   // If we get a splat of zeros, convert this vector store to a store of
17157   // scalars. They will be merged into store pairs of xzr thereby removing one
17158   // instruction and one register.
17159   if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S))
17160     return ReplacedZeroSplat;
17161 
17162   // FIXME: The logic for deciding if an unaligned store should be split should
17163   // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
17164   // a call to that function here.
17165 
17166   if (!Subtarget->isMisaligned128StoreSlow())
17167     return SDValue();
17168 
17169   // Don't split at -Oz.
17170   if (DAG.getMachineFunction().getFunction().hasMinSize())
17171     return SDValue();
17172 
17173   // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
17174   // those up regresses performance on micro-benchmarks and olden/bh.
17175   if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
17176     return SDValue();
17177 
17178   // Split unaligned 16B stores. They are terrible for performance.
17179   // Don't split stores with alignment of 1 or 2. Code that uses clang vector
17180   // extensions can use this to mark that it does not want splitting to happen
17181   // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
17182   // eliminating alignment hazards is only 1 in 8 for alignment of 2.
17183   if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) ||
17184       S->getAlign() <= Align(2))
17185     return SDValue();
17186 
17187   // If we get a splat of a scalar convert this vector store to a store of
17188   // scalars. They will be merged into store pairs thereby removing two
17189   // instructions.
17190   if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S))
17191     return ReplacedSplat;
17192 
17193   SDLoc DL(S);
17194 
17195   // Split VT into two.
17196   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
17197   unsigned NumElts = HalfVT.getVectorNumElements();
17198   SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17199                                    DAG.getConstant(0, DL, MVT::i64));
17200   SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17201                                    DAG.getConstant(NumElts, DL, MVT::i64));
17202   SDValue BasePtr = S->getBasePtr();
17203   SDValue NewST1 =
17204       DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
17205                    S->getAlign(), S->getMemOperand()->getFlags());
17206   SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
17207                                   DAG.getConstant(8, DL, MVT::i64));
17208   return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
17209                       S->getPointerInfo(), S->getAlign(),
17210                       S->getMemOperand()->getFlags());
17211 }
17212 
17213 static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
17214   assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
17215 
17216   // splice(pg, op1, undef) -> op1
17217   if (N->getOperand(2).isUndef())
17218     return N->getOperand(1);
17219 
17220   return SDValue();
17221 }
17222 
17223 static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
17224                                     const AArch64Subtarget *Subtarget) {
17225   assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
17226           N->getOpcode() == AArch64ISD::UUNPKLO) &&
17227          "Unexpected Opcode!");
17228 
17229   // uunpklo/hi undef -> undef
17230   if (N->getOperand(0).isUndef())
17231     return DAG.getUNDEF(N->getValueType(0));
17232 
17233   // If this is a masked load followed by an UUNPKLO, fold this into a masked
17234   // extending load.  We can do this even if this is already a masked
17235   // {z,}extload.
17236   if (N->getOperand(0).getOpcode() == ISD::MLOAD &&
17237       N->getOpcode() == AArch64ISD::UUNPKLO) {
17238     MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0));
17239     SDValue Mask = MLD->getMask();
17240     SDLoc DL(N);
17241 
17242     if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
17243         SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
17244         (MLD->getPassThru()->isUndef() ||
17245          isZerosVector(MLD->getPassThru().getNode()))) {
17246       unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
17247       unsigned PgPattern = Mask->getConstantOperandVal(0);
17248       EVT VT = N->getValueType(0);
17249 
17250       // Ensure we can double the size of the predicate pattern
17251       unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
17252       if (NumElts &&
17253           NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) {
17254         Mask =
17255             getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern);
17256         SDValue PassThru = DAG.getConstant(0, DL, VT);
17257         SDValue NewLoad = DAG.getMaskedLoad(
17258             VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask,
17259             PassThru, MLD->getMemoryVT(), MLD->getMemOperand(),
17260             MLD->getAddressingMode(), ISD::ZEXTLOAD);
17261 
17262         DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), NewLoad.getValue(1));
17263 
17264         return NewLoad;
17265       }
17266     }
17267   }
17268 
17269   return SDValue();
17270 }
17271 
17272 static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) {
17273   SDLoc DL(N);
17274   SDValue Op0 = N->getOperand(0);
17275   SDValue Op1 = N->getOperand(1);
17276   EVT ResVT = N->getValueType(0);
17277 
17278   // uzp1(x, undef) -> concat(truncate(x), undef)
17279   if (Op1.getOpcode() == ISD::UNDEF) {
17280     EVT BCVT = MVT::Other, HalfVT = MVT::Other;
17281     switch (ResVT.getSimpleVT().SimpleTy) {
17282     default:
17283       break;
17284     case MVT::v16i8:
17285       BCVT = MVT::v8i16;
17286       HalfVT = MVT::v8i8;
17287       break;
17288     case MVT::v8i16:
17289       BCVT = MVT::v4i32;
17290       HalfVT = MVT::v4i16;
17291       break;
17292     case MVT::v4i32:
17293       BCVT = MVT::v2i64;
17294       HalfVT = MVT::v2i32;
17295       break;
17296     }
17297     if (BCVT != MVT::Other) {
17298       SDValue BC = DAG.getBitcast(BCVT, Op0);
17299       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC);
17300       return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc,
17301                          DAG.getUNDEF(HalfVT));
17302     }
17303   }
17304 
17305   // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
17306   if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
17307     if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17308       SDValue X = Op0.getOperand(0).getOperand(0);
17309       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
17310     }
17311   }
17312 
17313   // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
17314   if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
17315     if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17316       SDValue Z = Op1.getOperand(0).getOperand(1);
17317       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
17318     }
17319   }
17320 
17321   return SDValue();
17322 }
17323 
17324 static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {
17325   unsigned Opc = N->getOpcode();
17326 
17327   assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads
17328            Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||
17329           (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads
17330            Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&
17331          "Invalid opcode.");
17332 
17333   const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO ||
17334                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17335   const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO ||
17336                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17337   const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO ||
17338                         Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO ||
17339                         Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO ||
17340                         Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO;
17341 
17342   SDLoc DL(N);
17343   SDValue Chain = N->getOperand(0);
17344   SDValue Pg = N->getOperand(1);
17345   SDValue Base = N->getOperand(2);
17346   SDValue Offset = N->getOperand(3);
17347   SDValue Ty = N->getOperand(4);
17348 
17349   EVT ResVT = N->getValueType(0);
17350 
17351   const auto OffsetOpc = Offset.getOpcode();
17352   const bool OffsetIsZExt =
17353       OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU;
17354   const bool OffsetIsSExt =
17355       OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU;
17356 
17357   // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible.
17358   if (!Extended && (OffsetIsSExt || OffsetIsZExt)) {
17359     SDValue ExtPg = Offset.getOperand(0);
17360     VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode());
17361     EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType();
17362 
17363     // If the predicate for the sign- or zero-extended offset is the
17364     // same as the predicate used for this load and the sign-/zero-extension
17365     // was from a 32-bits...
17366     if (ExtPg == Pg && ExtFromEVT == MVT::i32) {
17367       SDValue UnextendedOffset = Offset.getOperand(1);
17368 
17369       unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true);
17370       if (Signed)
17371         NewOpc = getSignExtendedGatherOpcode(NewOpc);
17372 
17373       return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other},
17374                          {Chain, Pg, Base, UnextendedOffset, Ty});
17375     }
17376   }
17377 
17378   return SDValue();
17379 }
17380 
17381 /// Optimize a vector shift instruction and its operand if shifted out
17382 /// bits are not used.
17383 static SDValue performVectorShiftCombine(SDNode *N,
17384                                          const AArch64TargetLowering &TLI,
17385                                          TargetLowering::DAGCombinerInfo &DCI) {
17386   assert(N->getOpcode() == AArch64ISD::VASHR ||
17387          N->getOpcode() == AArch64ISD::VLSHR);
17388 
17389   SDValue Op = N->getOperand(0);
17390   unsigned OpScalarSize = Op.getScalarValueSizeInBits();
17391 
17392   unsigned ShiftImm = N->getConstantOperandVal(1);
17393   assert(OpScalarSize > ShiftImm && "Invalid shift imm");
17394 
17395   APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm);
17396   APInt DemandedMask = ~ShiftedOutBits;
17397 
17398   if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
17399     return SDValue(N, 0);
17400 
17401   return SDValue();
17402 }
17403 
17404 static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {
17405   // sunpklo(sext(pred)) -> sext(extract_low_half(pred))
17406   // This transform works in partnership with performSetCCPunpkCombine to
17407   // remove unnecessary transfer of predicates into standard registers and back
17408   if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND &&
17409       N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() ==
17410           MVT::i1) {
17411     SDValue CC = N->getOperand(0)->getOperand(0);
17412     auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext());
17413     SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC,
17414                                DAG.getVectorIdxConstant(0, SDLoc(N)));
17415     return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk);
17416   }
17417 
17418   return SDValue();
17419 }
17420 
17421 /// Target-specific DAG combine function for post-increment LD1 (lane) and
17422 /// post-increment LD1R.
17423 static SDValue performPostLD1Combine(SDNode *N,
17424                                      TargetLowering::DAGCombinerInfo &DCI,
17425                                      bool IsLaneOp) {
17426   if (DCI.isBeforeLegalizeOps())
17427     return SDValue();
17428 
17429   SelectionDAG &DAG = DCI.DAG;
17430   EVT VT = N->getValueType(0);
17431 
17432   if (!VT.is128BitVector() && !VT.is64BitVector())
17433     return SDValue();
17434 
17435   unsigned LoadIdx = IsLaneOp ? 1 : 0;
17436   SDNode *LD = N->getOperand(LoadIdx).getNode();
17437   // If it is not LOAD, can not do such combine.
17438   if (LD->getOpcode() != ISD::LOAD)
17439     return SDValue();
17440 
17441   // The vector lane must be a constant in the LD1LANE opcode.
17442   SDValue Lane;
17443   if (IsLaneOp) {
17444     Lane = N->getOperand(2);
17445     auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
17446     if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
17447       return SDValue();
17448   }
17449 
17450   LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
17451   EVT MemVT = LoadSDN->getMemoryVT();
17452   // Check if memory operand is the same type as the vector element.
17453   if (MemVT != VT.getVectorElementType())
17454     return SDValue();
17455 
17456   // Check if there are other uses. If so, do not combine as it will introduce
17457   // an extra load.
17458   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
17459        ++UI) {
17460     if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
17461       continue;
17462     if (*UI != N)
17463       return SDValue();
17464   }
17465 
17466   SDValue Addr = LD->getOperand(1);
17467   SDValue Vector = N->getOperand(0);
17468   // Search for a use of the address operand that is an increment.
17469   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
17470        Addr.getNode()->use_end(); UI != UE; ++UI) {
17471     SDNode *User = *UI;
17472     if (User->getOpcode() != ISD::ADD
17473         || UI.getUse().getResNo() != Addr.getResNo())
17474       continue;
17475 
17476     // If the increment is a constant, it must match the memory ref size.
17477     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17478     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17479       uint32_t IncVal = CInc->getZExtValue();
17480       unsigned NumBytes = VT.getScalarSizeInBits() / 8;
17481       if (IncVal != NumBytes)
17482         continue;
17483       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17484     }
17485 
17486     // To avoid cycle construction make sure that neither the load nor the add
17487     // are predecessors to each other or the Vector.
17488     SmallPtrSet<const SDNode *, 32> Visited;
17489     SmallVector<const SDNode *, 16> Worklist;
17490     Visited.insert(Addr.getNode());
17491     Worklist.push_back(User);
17492     Worklist.push_back(LD);
17493     Worklist.push_back(Vector.getNode());
17494     if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) ||
17495         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17496       continue;
17497 
17498     SmallVector<SDValue, 8> Ops;
17499     Ops.push_back(LD->getOperand(0));  // Chain
17500     if (IsLaneOp) {
17501       Ops.push_back(Vector);           // The vector to be inserted
17502       Ops.push_back(Lane);             // The lane to be inserted in the vector
17503     }
17504     Ops.push_back(Addr);
17505     Ops.push_back(Inc);
17506 
17507     EVT Tys[3] = { VT, MVT::i64, MVT::Other };
17508     SDVTList SDTys = DAG.getVTList(Tys);
17509     unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
17510     SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
17511                                            MemVT,
17512                                            LoadSDN->getMemOperand());
17513 
17514     // Update the uses.
17515     SDValue NewResults[] = {
17516         SDValue(LD, 0),            // The result of load
17517         SDValue(UpdN.getNode(), 2) // Chain
17518     };
17519     DCI.CombineTo(LD, NewResults);
17520     DCI.CombineTo(N, SDValue(UpdN.getNode(), 0));     // Dup/Inserted Result
17521     DCI.CombineTo(User, SDValue(UpdN.getNode(), 1));  // Write back register
17522 
17523     break;
17524   }
17525   return SDValue();
17526 }
17527 
17528 /// Simplify ``Addr`` given that the top byte of it is ignored by HW during
17529 /// address translation.
17530 static bool performTBISimplification(SDValue Addr,
17531                                      TargetLowering::DAGCombinerInfo &DCI,
17532                                      SelectionDAG &DAG) {
17533   APInt DemandedMask = APInt::getLowBitsSet(64, 56);
17534   KnownBits Known;
17535   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
17536                                         !DCI.isBeforeLegalizeOps());
17537   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17538   if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
17539     DCI.CommitTargetLoweringOpt(TLO);
17540     return true;
17541   }
17542   return false;
17543 }
17544 
17545 static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
17546   assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&
17547          "Expected STORE dag node in input!");
17548 
17549   if (auto Store = dyn_cast<StoreSDNode>(N)) {
17550     if (!Store->isTruncatingStore() || Store->isIndexed())
17551       return SDValue();
17552     SDValue Ext = Store->getValue();
17553     auto ExtOpCode = Ext.getOpcode();
17554     if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND &&
17555         ExtOpCode != ISD::ANY_EXTEND)
17556       return SDValue();
17557     SDValue Orig = Ext->getOperand(0);
17558     if (Store->getMemoryVT() != Orig.getValueType())
17559       return SDValue();
17560     return DAG.getStore(Store->getChain(), SDLoc(Store), Orig,
17561                         Store->getBasePtr(), Store->getMemOperand());
17562   }
17563 
17564   return SDValue();
17565 }
17566 
17567 static SDValue performSTORECombine(SDNode *N,
17568                                    TargetLowering::DAGCombinerInfo &DCI,
17569                                    SelectionDAG &DAG,
17570                                    const AArch64Subtarget *Subtarget) {
17571   StoreSDNode *ST = cast<StoreSDNode>(N);
17572   SDValue Chain = ST->getChain();
17573   SDValue Value = ST->getValue();
17574   SDValue Ptr = ST->getBasePtr();
17575 
17576   // If this is an FP_ROUND followed by a store, fold this into a truncating
17577   // store. We can do this even if this is already a truncstore.
17578   // We purposefully don't care about legality of the nodes here as we know
17579   // they can be split down into something legal.
17580   if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND &&
17581       Value.getNode()->hasOneUse() && ST->isUnindexed() &&
17582       Subtarget->useSVEForFixedLengthVectors() &&
17583       Value.getValueType().isFixedLengthVector() &&
17584       Value.getValueType().getFixedSizeInBits() >=
17585           Subtarget->getMinSVEVectorSizeInBits())
17586     return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
17587                              ST->getMemoryVT(), ST->getMemOperand());
17588 
17589   if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
17590     return Split;
17591 
17592   if (Subtarget->supportsAddressTopByteIgnored() &&
17593       performTBISimplification(N->getOperand(2), DCI, DAG))
17594     return SDValue(N, 0);
17595 
17596   if (SDValue Store = foldTruncStoreOfExt(DAG, N))
17597     return Store;
17598 
17599   return SDValue();
17600 }
17601 
17602 static SDValue performMSTORECombine(SDNode *N,
17603                                     TargetLowering::DAGCombinerInfo &DCI,
17604                                     SelectionDAG &DAG,
17605                                     const AArch64Subtarget *Subtarget) {
17606   MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
17607   SDValue Value = MST->getValue();
17608   SDValue Mask = MST->getMask();
17609   SDLoc DL(N);
17610 
17611   // If this is a UZP1 followed by a masked store, fold this into a masked
17612   // truncating store.  We can do this even if this is already a masked
17613   // truncstore.
17614   if (Value.getOpcode() == AArch64ISD::UZP1 && Value->hasOneUse() &&
17615       MST->isUnindexed() && Mask->getOpcode() == AArch64ISD::PTRUE &&
17616       Value.getValueType().isInteger()) {
17617     Value = Value.getOperand(0);
17618     if (Value.getOpcode() == ISD::BITCAST) {
17619       EVT HalfVT =
17620           Value.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
17621       EVT InVT = Value.getOperand(0).getValueType();
17622 
17623       if (HalfVT.widenIntegerVectorElementType(*DAG.getContext()) == InVT) {
17624         unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
17625         unsigned PgPattern = Mask->getConstantOperandVal(0);
17626 
17627         // Ensure we can double the size of the predicate pattern
17628         unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
17629         if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <=
17630                            MinSVESize) {
17631           Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1),
17632                           PgPattern);
17633           return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0),
17634                                     MST->getBasePtr(), MST->getOffset(), Mask,
17635                                     MST->getMemoryVT(), MST->getMemOperand(),
17636                                     MST->getAddressingMode(),
17637                                     /*IsTruncating=*/true);
17638         }
17639       }
17640     }
17641   }
17642 
17643   return SDValue();
17644 }
17645 
17646 /// \return true if part of the index was folded into the Base.
17647 static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
17648                               SDLoc DL, SelectionDAG &DAG) {
17649   // This function assumes a vector of i64 indices.
17650   EVT IndexVT = Index.getValueType();
17651   if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64)
17652     return false;
17653 
17654   // Simplify:
17655   //   BasePtr = Ptr
17656   //   Index = X + splat(Offset)
17657   // ->
17658   //   BasePtr = Ptr + Offset * scale.
17659   //   Index = X
17660   if (Index.getOpcode() == ISD::ADD) {
17661     if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) {
17662       Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17663       BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17664       Index = Index.getOperand(0);
17665       return true;
17666     }
17667   }
17668 
17669   // Simplify:
17670   //   BasePtr = Ptr
17671   //   Index = (X + splat(Offset)) << splat(Shift)
17672   // ->
17673   //   BasePtr = Ptr + (Offset << Shift) * scale)
17674   //   Index = X << splat(shift)
17675   if (Index.getOpcode() == ISD::SHL &&
17676       Index.getOperand(0).getOpcode() == ISD::ADD) {
17677     SDValue Add = Index.getOperand(0);
17678     SDValue ShiftOp = Index.getOperand(1);
17679     SDValue OffsetOp = Add.getOperand(1);
17680     if (auto Shift = DAG.getSplatValue(ShiftOp))
17681       if (auto Offset = DAG.getSplatValue(OffsetOp)) {
17682         Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift);
17683         Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17684         BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17685         Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
17686                             Add.getOperand(0), ShiftOp);
17687         return true;
17688       }
17689   }
17690 
17691   return false;
17692 }
17693 
17694 // Analyse the specified address returning true if a more optimal addressing
17695 // mode is available. When returning true all parameters are updated to reflect
17696 // their recommended values.
17697 static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
17698                                      SDValue &BasePtr, SDValue &Index,
17699                                      SelectionDAG &DAG) {
17700   // Try to iteratively fold parts of the index into the base pointer to
17701   // simplify the index as much as possible.
17702   bool Changed = false;
17703   while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG))
17704     Changed = true;
17705 
17706   // Only consider element types that are pointer sized as smaller types can
17707   // be easily promoted.
17708   EVT IndexVT = Index.getValueType();
17709   if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64)
17710     return Changed;
17711 
17712   // Match:
17713   //   Index = step(const)
17714   int64_t Stride = 0;
17715   if (Index.getOpcode() == ISD::STEP_VECTOR)
17716     Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue();
17717 
17718   // Match:
17719   //   Index = step(const) << shift(const)
17720   else if (Index.getOpcode() == ISD::SHL &&
17721            Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) {
17722     SDValue RHS = Index.getOperand(1);
17723     if (auto *Shift =
17724             dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) {
17725       int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1);
17726       Stride = Step << Shift->getZExtValue();
17727     }
17728   }
17729 
17730   // Return early because no supported pattern is found.
17731   if (Stride == 0)
17732     return Changed;
17733 
17734   if (Stride < std::numeric_limits<int32_t>::min() ||
17735       Stride > std::numeric_limits<int32_t>::max())
17736     return Changed;
17737 
17738   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
17739   unsigned MaxVScale =
17740       Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
17741   int64_t LastElementOffset =
17742       IndexVT.getVectorMinNumElements() * Stride * MaxVScale;
17743 
17744   if (LastElementOffset < std::numeric_limits<int32_t>::min() ||
17745       LastElementOffset > std::numeric_limits<int32_t>::max())
17746     return Changed;
17747 
17748   EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
17749   // Stride does not scale explicitly by 'Scale', because it happens in
17750   // the gather/scatter addressing mode.
17751   Index = DAG.getNode(ISD::STEP_VECTOR, SDLoc(N), NewIndexVT,
17752                       DAG.getTargetConstant(Stride, SDLoc(N), MVT::i32));
17753   return true;
17754 }
17755 
17756 static SDValue performMaskedGatherScatterCombine(
17757     SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {
17758   MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N);
17759   assert(MGS && "Can only combine gather load or scatter store nodes");
17760 
17761   if (!DCI.isBeforeLegalize())
17762     return SDValue();
17763 
17764   SDLoc DL(MGS);
17765   SDValue Chain = MGS->getChain();
17766   SDValue Scale = MGS->getScale();
17767   SDValue Index = MGS->getIndex();
17768   SDValue Mask = MGS->getMask();
17769   SDValue BasePtr = MGS->getBasePtr();
17770   ISD::MemIndexType IndexType = MGS->getIndexType();
17771 
17772   if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG))
17773     return SDValue();
17774 
17775   // Here we catch such cases early and change MGATHER's IndexType to allow
17776   // the use of an Index that's more legalisation friendly.
17777   if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) {
17778     SDValue PassThru = MGT->getPassThru();
17779     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
17780     return DAG.getMaskedGather(
17781         DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL,
17782         Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType());
17783   }
17784   auto *MSC = cast<MaskedScatterSDNode>(MGS);
17785   SDValue Data = MSC->getValue();
17786   SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale};
17787   return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL,
17788                               Ops, MSC->getMemOperand(), IndexType,
17789                               MSC->isTruncatingStore());
17790 }
17791 
17792 /// Target-specific DAG combine function for NEON load/store intrinsics
17793 /// to merge base address updates.
17794 static SDValue performNEONPostLDSTCombine(SDNode *N,
17795                                           TargetLowering::DAGCombinerInfo &DCI,
17796                                           SelectionDAG &DAG) {
17797   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
17798     return SDValue();
17799 
17800   unsigned AddrOpIdx = N->getNumOperands() - 1;
17801   SDValue Addr = N->getOperand(AddrOpIdx);
17802 
17803   // Search for a use of the address operand that is an increment.
17804   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
17805        UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
17806     SDNode *User = *UI;
17807     if (User->getOpcode() != ISD::ADD ||
17808         UI.getUse().getResNo() != Addr.getResNo())
17809       continue;
17810 
17811     // Check that the add is independent of the load/store.  Otherwise, folding
17812     // it would create a cycle.
17813     SmallPtrSet<const SDNode *, 32> Visited;
17814     SmallVector<const SDNode *, 16> Worklist;
17815     Visited.insert(Addr.getNode());
17816     Worklist.push_back(N);
17817     Worklist.push_back(User);
17818     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
17819         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17820       continue;
17821 
17822     // Find the new opcode for the updating load/store.
17823     bool IsStore = false;
17824     bool IsLaneOp = false;
17825     bool IsDupOp = false;
17826     unsigned NewOpc = 0;
17827     unsigned NumVecs = 0;
17828     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
17829     switch (IntNo) {
17830     default: llvm_unreachable("unexpected intrinsic for Neon base update");
17831     case Intrinsic::aarch64_neon_ld2:       NewOpc = AArch64ISD::LD2post;
17832       NumVecs = 2; break;
17833     case Intrinsic::aarch64_neon_ld3:       NewOpc = AArch64ISD::LD3post;
17834       NumVecs = 3; break;
17835     case Intrinsic::aarch64_neon_ld4:       NewOpc = AArch64ISD::LD4post;
17836       NumVecs = 4; break;
17837     case Intrinsic::aarch64_neon_st2:       NewOpc = AArch64ISD::ST2post;
17838       NumVecs = 2; IsStore = true; break;
17839     case Intrinsic::aarch64_neon_st3:       NewOpc = AArch64ISD::ST3post;
17840       NumVecs = 3; IsStore = true; break;
17841     case Intrinsic::aarch64_neon_st4:       NewOpc = AArch64ISD::ST4post;
17842       NumVecs = 4; IsStore = true; break;
17843     case Intrinsic::aarch64_neon_ld1x2:     NewOpc = AArch64ISD::LD1x2post;
17844       NumVecs = 2; break;
17845     case Intrinsic::aarch64_neon_ld1x3:     NewOpc = AArch64ISD::LD1x3post;
17846       NumVecs = 3; break;
17847     case Intrinsic::aarch64_neon_ld1x4:     NewOpc = AArch64ISD::LD1x4post;
17848       NumVecs = 4; break;
17849     case Intrinsic::aarch64_neon_st1x2:     NewOpc = AArch64ISD::ST1x2post;
17850       NumVecs = 2; IsStore = true; break;
17851     case Intrinsic::aarch64_neon_st1x3:     NewOpc = AArch64ISD::ST1x3post;
17852       NumVecs = 3; IsStore = true; break;
17853     case Intrinsic::aarch64_neon_st1x4:     NewOpc = AArch64ISD::ST1x4post;
17854       NumVecs = 4; IsStore = true; break;
17855     case Intrinsic::aarch64_neon_ld2r:      NewOpc = AArch64ISD::LD2DUPpost;
17856       NumVecs = 2; IsDupOp = true; break;
17857     case Intrinsic::aarch64_neon_ld3r:      NewOpc = AArch64ISD::LD3DUPpost;
17858       NumVecs = 3; IsDupOp = true; break;
17859     case Intrinsic::aarch64_neon_ld4r:      NewOpc = AArch64ISD::LD4DUPpost;
17860       NumVecs = 4; IsDupOp = true; break;
17861     case Intrinsic::aarch64_neon_ld2lane:   NewOpc = AArch64ISD::LD2LANEpost;
17862       NumVecs = 2; IsLaneOp = true; break;
17863     case Intrinsic::aarch64_neon_ld3lane:   NewOpc = AArch64ISD::LD3LANEpost;
17864       NumVecs = 3; IsLaneOp = true; break;
17865     case Intrinsic::aarch64_neon_ld4lane:   NewOpc = AArch64ISD::LD4LANEpost;
17866       NumVecs = 4; IsLaneOp = true; break;
17867     case Intrinsic::aarch64_neon_st2lane:   NewOpc = AArch64ISD::ST2LANEpost;
17868       NumVecs = 2; IsStore = true; IsLaneOp = true; break;
17869     case Intrinsic::aarch64_neon_st3lane:   NewOpc = AArch64ISD::ST3LANEpost;
17870       NumVecs = 3; IsStore = true; IsLaneOp = true; break;
17871     case Intrinsic::aarch64_neon_st4lane:   NewOpc = AArch64ISD::ST4LANEpost;
17872       NumVecs = 4; IsStore = true; IsLaneOp = true; break;
17873     }
17874 
17875     EVT VecTy;
17876     if (IsStore)
17877       VecTy = N->getOperand(2).getValueType();
17878     else
17879       VecTy = N->getValueType(0);
17880 
17881     // If the increment is a constant, it must match the memory ref size.
17882     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17883     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17884       uint32_t IncVal = CInc->getZExtValue();
17885       unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
17886       if (IsLaneOp || IsDupOp)
17887         NumBytes /= VecTy.getVectorNumElements();
17888       if (IncVal != NumBytes)
17889         continue;
17890       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17891     }
17892     SmallVector<SDValue, 8> Ops;
17893     Ops.push_back(N->getOperand(0)); // Incoming chain
17894     // Load lane and store have vector list as input.
17895     if (IsLaneOp || IsStore)
17896       for (unsigned i = 2; i < AddrOpIdx; ++i)
17897         Ops.push_back(N->getOperand(i));
17898     Ops.push_back(Addr); // Base register
17899     Ops.push_back(Inc);
17900 
17901     // Return Types.
17902     EVT Tys[6];
17903     unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
17904     unsigned n;
17905     for (n = 0; n < NumResultVecs; ++n)
17906       Tys[n] = VecTy;
17907     Tys[n++] = MVT::i64;  // Type of write back register
17908     Tys[n] = MVT::Other;  // Type of the chain
17909     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
17910 
17911     MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
17912     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
17913                                            MemInt->getMemoryVT(),
17914                                            MemInt->getMemOperand());
17915 
17916     // Update the uses.
17917     std::vector<SDValue> NewResults;
17918     for (unsigned i = 0; i < NumResultVecs; ++i) {
17919       NewResults.push_back(SDValue(UpdN.getNode(), i));
17920     }
17921     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
17922     DCI.CombineTo(N, NewResults);
17923     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
17924 
17925     break;
17926   }
17927   return SDValue();
17928 }
17929 
17930 // Checks to see if the value is the prescribed width and returns information
17931 // about its extension mode.
17932 static
17933 bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
17934   ExtType = ISD::NON_EXTLOAD;
17935   switch(V.getNode()->getOpcode()) {
17936   default:
17937     return false;
17938   case ISD::LOAD: {
17939     LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
17940     if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8)
17941        || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) {
17942       ExtType = LoadNode->getExtensionType();
17943       return true;
17944     }
17945     return false;
17946   }
17947   case ISD::AssertSext: {
17948     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17949     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17950        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17951       ExtType = ISD::SEXTLOAD;
17952       return true;
17953     }
17954     return false;
17955   }
17956   case ISD::AssertZext: {
17957     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17958     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17959        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17960       ExtType = ISD::ZEXTLOAD;
17961       return true;
17962     }
17963     return false;
17964   }
17965   case ISD::Constant:
17966   case ISD::TargetConstant: {
17967     return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
17968            1LL << (width - 1);
17969   }
17970   }
17971 
17972   return true;
17973 }
17974 
17975 // This function does a whole lot of voodoo to determine if the tests are
17976 // equivalent without and with a mask. Essentially what happens is that given a
17977 // DAG resembling:
17978 //
17979 //  +-------------+ +-------------+ +-------------+ +-------------+
17980 //  |    Input    | | AddConstant | | CompConstant| |     CC      |
17981 //  +-------------+ +-------------+ +-------------+ +-------------+
17982 //           |           |           |               |
17983 //           V           V           |    +----------+
17984 //          +-------------+  +----+  |    |
17985 //          |     ADD     |  |0xff|  |    |
17986 //          +-------------+  +----+  |    |
17987 //                  |           |    |    |
17988 //                  V           V    |    |
17989 //                 +-------------+   |    |
17990 //                 |     AND     |   |    |
17991 //                 +-------------+   |    |
17992 //                      |            |    |
17993 //                      +-----+      |    |
17994 //                            |      |    |
17995 //                            V      V    V
17996 //                           +-------------+
17997 //                           |     CMP     |
17998 //                           +-------------+
17999 //
18000 // The AND node may be safely removed for some combinations of inputs. In
18001 // particular we need to take into account the extension type of the Input,
18002 // the exact values of AddConstant, CompConstant, and CC, along with the nominal
18003 // width of the input (this can work for any width inputs, the above graph is
18004 // specific to 8 bits.
18005 //
18006 // The specific equations were worked out by generating output tables for each
18007 // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The
18008 // problem was simplified by working with 4 bit inputs, which means we only
18009 // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero
18010 // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8
18011 // patterns present in both extensions (0,7). For every distinct set of
18012 // AddConstant and CompConstants bit patterns we can consider the masked and
18013 // unmasked versions to be equivalent if the result of this function is true for
18014 // all 16 distinct bit patterns of for the current extension type of Input (w0).
18015 //
18016 //   sub      w8, w0, w1
18017 //   and      w10, w8, #0x0f
18018 //   cmp      w8, w2
18019 //   cset     w9, AArch64CC
18020 //   cmp      w10, w2
18021 //   cset     w11, AArch64CC
18022 //   cmp      w9, w11
18023 //   cset     w0, eq
18024 //   ret
18025 //
18026 // Since the above function shows when the outputs are equivalent it defines
18027 // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and
18028 // would be expensive to run during compiles. The equations below were written
18029 // in a test harness that confirmed they gave equivalent outputs to the above
18030 // for all inputs function, so they can be used determine if the removal is
18031 // legal instead.
18032 //
18033 // isEquivalentMaskless() is the code for testing if the AND can be removed
18034 // factored out of the DAG recognition as the DAG can take several forms.
18035 
18036 static bool isEquivalentMaskless(unsigned CC, unsigned width,
18037                                  ISD::LoadExtType ExtType, int AddConstant,
18038                                  int CompConstant) {
18039   // By being careful about our equations and only writing the in term
18040   // symbolic values and well known constants (0, 1, -1, MaxUInt) we can
18041   // make them generally applicable to all bit widths.
18042   int MaxUInt = (1 << width);
18043 
18044   // For the purposes of these comparisons sign extending the type is
18045   // equivalent to zero extending the add and displacing it by half the integer
18046   // width. Provided we are careful and make sure our equations are valid over
18047   // the whole range we can just adjust the input and avoid writing equations
18048   // for sign extended inputs.
18049   if (ExtType == ISD::SEXTLOAD)
18050     AddConstant -= (1 << (width-1));
18051 
18052   switch(CC) {
18053   case AArch64CC::LE:
18054   case AArch64CC::GT:
18055     if ((AddConstant == 0) ||
18056         (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
18057         (AddConstant >= 0 && CompConstant < 0) ||
18058         (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
18059       return true;
18060     break;
18061   case AArch64CC::LT:
18062   case AArch64CC::GE:
18063     if ((AddConstant == 0) ||
18064         (AddConstant >= 0 && CompConstant <= 0) ||
18065         (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
18066       return true;
18067     break;
18068   case AArch64CC::HI:
18069   case AArch64CC::LS:
18070     if ((AddConstant >= 0 && CompConstant < 0) ||
18071        (AddConstant <= 0 && CompConstant >= -1 &&
18072         CompConstant < AddConstant + MaxUInt))
18073       return true;
18074    break;
18075   case AArch64CC::PL:
18076   case AArch64CC::MI:
18077     if ((AddConstant == 0) ||
18078         (AddConstant > 0 && CompConstant <= 0) ||
18079         (AddConstant < 0 && CompConstant <= AddConstant))
18080       return true;
18081     break;
18082   case AArch64CC::LO:
18083   case AArch64CC::HS:
18084     if ((AddConstant >= 0 && CompConstant <= 0) ||
18085         (AddConstant <= 0 && CompConstant >= 0 &&
18086          CompConstant <= AddConstant + MaxUInt))
18087       return true;
18088     break;
18089   case AArch64CC::EQ:
18090   case AArch64CC::NE:
18091     if ((AddConstant > 0 && CompConstant < 0) ||
18092         (AddConstant < 0 && CompConstant >= 0 &&
18093          CompConstant < AddConstant + MaxUInt) ||
18094         (AddConstant >= 0 && CompConstant >= 0 &&
18095          CompConstant >= AddConstant) ||
18096         (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
18097       return true;
18098     break;
18099   case AArch64CC::VS:
18100   case AArch64CC::VC:
18101   case AArch64CC::AL:
18102   case AArch64CC::NV:
18103     return true;
18104   case AArch64CC::Invalid:
18105     break;
18106   }
18107 
18108   return false;
18109 }
18110 
18111 static
18112 SDValue performCONDCombine(SDNode *N,
18113                            TargetLowering::DAGCombinerInfo &DCI,
18114                            SelectionDAG &DAG, unsigned CCIndex,
18115                            unsigned CmpIndex) {
18116   unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue();
18117   SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
18118   unsigned CondOpcode = SubsNode->getOpcode();
18119 
18120   if (CondOpcode != AArch64ISD::SUBS)
18121     return SDValue();
18122 
18123   // There is a SUBS feeding this condition. Is it fed by a mask we can
18124   // use?
18125 
18126   SDNode *AndNode = SubsNode->getOperand(0).getNode();
18127   unsigned MaskBits = 0;
18128 
18129   if (AndNode->getOpcode() != ISD::AND)
18130     return SDValue();
18131 
18132   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) {
18133     uint32_t CNV = CN->getZExtValue();
18134     if (CNV == 255)
18135       MaskBits = 8;
18136     else if (CNV == 65535)
18137       MaskBits = 16;
18138   }
18139 
18140   if (!MaskBits)
18141     return SDValue();
18142 
18143   SDValue AddValue = AndNode->getOperand(0);
18144 
18145   if (AddValue.getOpcode() != ISD::ADD)
18146     return SDValue();
18147 
18148   // The basic dag structure is correct, grab the inputs and validate them.
18149 
18150   SDValue AddInputValue1 = AddValue.getNode()->getOperand(0);
18151   SDValue AddInputValue2 = AddValue.getNode()->getOperand(1);
18152   SDValue SubsInputValue = SubsNode->getOperand(1);
18153 
18154   // The mask is present and the provenance of all the values is a smaller type,
18155   // lets see if the mask is superfluous.
18156 
18157   if (!isa<ConstantSDNode>(AddInputValue2.getNode()) ||
18158       !isa<ConstantSDNode>(SubsInputValue.getNode()))
18159     return SDValue();
18160 
18161   ISD::LoadExtType ExtType;
18162 
18163   if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) ||
18164       !checkValueWidth(AddInputValue2, MaskBits, ExtType) ||
18165       !checkValueWidth(AddInputValue1, MaskBits, ExtType) )
18166     return SDValue();
18167 
18168   if(!isEquivalentMaskless(CC, MaskBits, ExtType,
18169                 cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(),
18170                 cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue()))
18171     return SDValue();
18172 
18173   // The AND is not necessary, remove it.
18174 
18175   SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0),
18176                                SubsNode->getValueType(1));
18177   SDValue Ops[] = { AddValue, SubsNode->getOperand(1) };
18178 
18179   SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops);
18180   DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode());
18181 
18182   return SDValue(N, 0);
18183 }
18184 
18185 // Optimize compare with zero and branch.
18186 static SDValue performBRCONDCombine(SDNode *N,
18187                                     TargetLowering::DAGCombinerInfo &DCI,
18188                                     SelectionDAG &DAG) {
18189   MachineFunction &MF = DAG.getMachineFunction();
18190   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
18191   // will not be produced, as they are conditional branch instructions that do
18192   // not set flags.
18193   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
18194     return SDValue();
18195 
18196   if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
18197     N = NV.getNode();
18198   SDValue Chain = N->getOperand(0);
18199   SDValue Dest = N->getOperand(1);
18200   SDValue CCVal = N->getOperand(2);
18201   SDValue Cmp = N->getOperand(3);
18202 
18203   assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
18204   unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
18205   if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
18206     return SDValue();
18207 
18208   unsigned CmpOpc = Cmp.getOpcode();
18209   if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
18210     return SDValue();
18211 
18212   // Only attempt folding if there is only one use of the flag and no use of the
18213   // value.
18214   if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
18215     return SDValue();
18216 
18217   SDValue LHS = Cmp.getOperand(0);
18218   SDValue RHS = Cmp.getOperand(1);
18219 
18220   assert(LHS.getValueType() == RHS.getValueType() &&
18221          "Expected the value type to be the same for both operands!");
18222   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
18223     return SDValue();
18224 
18225   if (isNullConstant(LHS))
18226     std::swap(LHS, RHS);
18227 
18228   if (!isNullConstant(RHS))
18229     return SDValue();
18230 
18231   if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
18232       LHS.getOpcode() == ISD::SRL)
18233     return SDValue();
18234 
18235   // Fold the compare into the branch instruction.
18236   SDValue BR;
18237   if (CC == AArch64CC::EQ)
18238     BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18239   else
18240     BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18241 
18242   // Do not add new nodes to DAG combiner worklist.
18243   DCI.CombineTo(N, BR, false);
18244 
18245   return SDValue();
18246 }
18247 
18248 static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {
18249   unsigned CC = N->getConstantOperandVal(2);
18250   SDValue SUBS = N->getOperand(3);
18251   SDValue Zero, CTTZ;
18252 
18253   if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) {
18254     Zero = N->getOperand(0);
18255     CTTZ = N->getOperand(1);
18256   } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) {
18257     Zero = N->getOperand(1);
18258     CTTZ = N->getOperand(0);
18259   } else
18260     return SDValue();
18261 
18262   if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) ||
18263       (CTTZ.getOpcode() == ISD::TRUNCATE &&
18264        CTTZ.getOperand(0).getOpcode() != ISD::CTTZ))
18265     return SDValue();
18266 
18267   assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&
18268          "Illegal type in CTTZ folding");
18269 
18270   if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1)))
18271     return SDValue();
18272 
18273   SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE
18274                   ? CTTZ.getOperand(0).getOperand(0)
18275                   : CTTZ.getOperand(0);
18276 
18277   if (X != SUBS.getOperand(0))
18278     return SDValue();
18279 
18280   unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE
18281                           ? CTTZ.getOperand(0).getValueSizeInBits()
18282                           : CTTZ.getValueSizeInBits();
18283   SDValue BitWidthMinusOne =
18284       DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType());
18285   return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ,
18286                      BitWidthMinusOne);
18287 }
18288 
18289 // Optimize CSEL instructions
18290 static SDValue performCSELCombine(SDNode *N,
18291                                   TargetLowering::DAGCombinerInfo &DCI,
18292                                   SelectionDAG &DAG) {
18293   // CSEL x, x, cc -> x
18294   if (N->getOperand(0) == N->getOperand(1))
18295     return N->getOperand(0);
18296 
18297   // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1
18298   // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1
18299   if (SDValue Folded = foldCSELofCTTZ(N, DAG))
18300 		return Folded;
18301 
18302   return performCONDCombine(N, DCI, DAG, 2, 3);
18303 }
18304 
18305 // Try to re-use an already extended operand of a vector SetCC feeding a
18306 // extended select. Doing so avoids requiring another full extension of the
18307 // SET_CC result when lowering the select.
18308 static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
18309   EVT Op0MVT = Op->getOperand(0).getValueType();
18310   if (!Op0MVT.isVector() || Op->use_empty())
18311     return SDValue();
18312 
18313   // Make sure that all uses of Op are VSELECTs with result matching types where
18314   // the result type has a larger element type than the SetCC operand.
18315   SDNode *FirstUse = *Op->use_begin();
18316   if (FirstUse->getOpcode() != ISD::VSELECT)
18317     return SDValue();
18318   EVT UseMVT = FirstUse->getValueType(0);
18319   if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits())
18320     return SDValue();
18321   if (any_of(Op->uses(), [&UseMVT](const SDNode *N) {
18322         return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT;
18323       }))
18324     return SDValue();
18325 
18326   APInt V;
18327   if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V))
18328     return SDValue();
18329 
18330   SDLoc DL(Op);
18331   SDValue Op0ExtV;
18332   SDValue Op1ExtV;
18333   ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get();
18334   // Check if the first operand of the SET_CC is already extended. If it is,
18335   // split the SET_CC and re-use the extended version of the operand.
18336   SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT),
18337                                         Op->getOperand(0));
18338   SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT),
18339                                         Op->getOperand(0));
18340   if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18341     Op0ExtV = SDValue(Op0SExt, 0);
18342     Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1));
18343   } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18344     Op0ExtV = SDValue(Op0ZExt, 0);
18345     Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1));
18346   } else
18347     return SDValue();
18348 
18349   return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
18350                      Op0ExtV, Op1ExtV, Op->getOperand(2));
18351 }
18352 
18353 static SDValue performSETCCCombine(SDNode *N,
18354                                    TargetLowering::DAGCombinerInfo &DCI,
18355                                    SelectionDAG &DAG) {
18356   assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
18357   SDValue LHS = N->getOperand(0);
18358   SDValue RHS = N->getOperand(1);
18359   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
18360   SDLoc DL(N);
18361   EVT VT = N->getValueType(0);
18362 
18363   if (SDValue V = tryToWidenSetCCOperands(N, DAG))
18364     return V;
18365 
18366   // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X
18367   if (Cond == ISD::SETNE && isOneConstant(RHS) &&
18368       LHS->getOpcode() == AArch64ISD::CSEL &&
18369       isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
18370       LHS->hasOneUse()) {
18371     // Invert CSEL's condition.
18372     auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2));
18373     auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue());
18374     auto NewCond = getInvertedCondCode(OldCond);
18375 
18376     // csel 0, 1, !cond, X
18377     SDValue CSEL =
18378         DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0),
18379                     LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32),
18380                     LHS.getOperand(3));
18381     return DAG.getZExtOrTrunc(CSEL, DL, VT);
18382   }
18383 
18384   // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
18385   if (Cond == ISD::SETNE && isNullConstant(RHS) &&
18386       LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
18387       LHS->hasOneUse()) {
18388     EVT TstVT = LHS->getValueType(0);
18389     if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
18390       // this pattern will get better opt in emitComparison
18391       uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
18392       SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),
18393                                 DAG.getConstant(TstImm, DL, TstVT));
18394       return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2));
18395     }
18396   }
18397 
18398   // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne)
18399   //   ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne)
18400   if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
18401       (Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) &&
18402       LHS->getOpcode() == ISD::BITCAST) {
18403     EVT ToVT = LHS->getValueType(0);
18404     EVT FromVT = LHS->getOperand(0).getValueType();
18405     if (FromVT.isFixedLengthVector() &&
18406         FromVT.getVectorElementType() == MVT::i1) {
18407       LHS = DAG.getNode(ISD::VECREDUCE_OR, DL, MVT::i1, LHS->getOperand(0));
18408       LHS = DAG.getNode(ISD::ZERO_EXTEND, DL, ToVT, LHS);
18409       return DAG.getSetCC(DL, VT, LHS, RHS, Cond);
18410     }
18411   }
18412 
18413   return SDValue();
18414 }
18415 
18416 // Replace a flag-setting operator (eg ANDS) with the generic version
18417 // (eg AND) if the flag is unused.
18418 static SDValue performFlagSettingCombine(SDNode *N,
18419                                          TargetLowering::DAGCombinerInfo &DCI,
18420                                          unsigned GenericOpcode) {
18421   SDLoc DL(N);
18422   SDValue LHS = N->getOperand(0);
18423   SDValue RHS = N->getOperand(1);
18424   EVT VT = N->getValueType(0);
18425 
18426   // If the flag result isn't used, convert back to a generic opcode.
18427   if (!N->hasAnyUseOfValue(1)) {
18428     SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops());
18429     return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)},
18430                                   DL);
18431   }
18432 
18433   // Combine identical generic nodes into this node, re-using the result.
18434   if (SDNode *Generic = DCI.DAG.getNodeIfExists(
18435           GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
18436     DCI.CombineTo(Generic, SDValue(N, 0));
18437 
18438   return SDValue();
18439 }
18440 
18441 static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {
18442   // setcc_merge_zero pred
18443   //   (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne
18444   //   => extract_subvector (inner setcc_merge_zero)
18445   SDValue Pred = N->getOperand(0);
18446   SDValue LHS = N->getOperand(1);
18447   SDValue RHS = N->getOperand(2);
18448   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18449 
18450   if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) ||
18451       LHS->getOpcode() != ISD::SIGN_EXTEND)
18452     return SDValue();
18453 
18454   SDValue Extract = LHS->getOperand(0);
18455   if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
18456       Extract->getValueType(0) != N->getValueType(0) ||
18457       Extract->getConstantOperandVal(1) != 0)
18458     return SDValue();
18459 
18460   SDValue InnerSetCC = Extract->getOperand(0);
18461   if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO)
18462     return SDValue();
18463 
18464   // By this point we've effectively got
18465   // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive
18466   // lanes are already zero then the trunc(sext()) sequence is redundant and we
18467   // can operate on A directly.
18468   SDValue InnerPred = InnerSetCC.getOperand(0);
18469   if (Pred.getOpcode() == AArch64ISD::PTRUE &&
18470       InnerPred.getOpcode() == AArch64ISD::PTRUE &&
18471       Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) &&
18472       Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 &&
18473       Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256)
18474     return Extract;
18475 
18476   return SDValue();
18477 }
18478 
18479 static SDValue
18480 performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
18481   assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18482          "Unexpected opcode!");
18483 
18484   SelectionDAG &DAG = DCI.DAG;
18485   SDValue Pred = N->getOperand(0);
18486   SDValue LHS = N->getOperand(1);
18487   SDValue RHS = N->getOperand(2);
18488   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18489 
18490   if (SDValue V = performSetCCPunpkCombine(N, DAG))
18491     return V;
18492 
18493   if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
18494       LHS->getOpcode() == ISD::SIGN_EXTEND &&
18495       LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) {
18496     //    setcc_merge_zero(
18497     //       pred, extend(setcc_merge_zero(pred, ...)), != splat(0))
18498     // => setcc_merge_zero(pred, ...)
18499     if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18500         LHS->getOperand(0)->getOperand(0) == Pred)
18501       return LHS->getOperand(0);
18502 
18503     //    setcc_merge_zero(
18504     //        all_active, extend(nxvNi1 ...), != splat(0))
18505     // -> nxvNi1 ...
18506     if (isAllActivePredicate(DAG, Pred))
18507       return LHS->getOperand(0);
18508 
18509     //    setcc_merge_zero(
18510     //        pred, extend(nxvNi1 ...), != splat(0))
18511     // -> nxvNi1 and(pred, ...)
18512     if (DCI.isAfterLegalizeDAG())
18513       // Do this after legalization to allow more folds on setcc_merge_zero
18514       // to be recognized.
18515       return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0),
18516                          LHS->getOperand(0), Pred);
18517   }
18518 
18519   return SDValue();
18520 }
18521 
18522 // Optimize some simple tbz/tbnz cases.  Returns the new operand and bit to test
18523 // as well as whether the test should be inverted.  This code is required to
18524 // catch these cases (as opposed to standard dag combines) because
18525 // AArch64ISD::TBZ is matched during legalization.
18526 static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
18527                                  SelectionDAG &DAG) {
18528 
18529   if (!Op->hasOneUse())
18530     return Op;
18531 
18532   // We don't handle undef/constant-fold cases below, as they should have
18533   // already been taken care of (e.g. and of 0, test of undefined shifted bits,
18534   // etc.)
18535 
18536   // (tbz (trunc x), b) -> (tbz x, b)
18537   // This case is just here to enable more of the below cases to be caught.
18538   if (Op->getOpcode() == ISD::TRUNCATE &&
18539       Bit < Op->getValueType(0).getSizeInBits()) {
18540     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18541   }
18542 
18543   // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
18544   if (Op->getOpcode() == ISD::ANY_EXTEND &&
18545       Bit < Op->getOperand(0).getValueSizeInBits()) {
18546     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18547   }
18548 
18549   if (Op->getNumOperands() != 2)
18550     return Op;
18551 
18552   auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
18553   if (!C)
18554     return Op;
18555 
18556   switch (Op->getOpcode()) {
18557   default:
18558     return Op;
18559 
18560   // (tbz (and x, m), b) -> (tbz x, b)
18561   case ISD::AND:
18562     if ((C->getZExtValue() >> Bit) & 1)
18563       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18564     return Op;
18565 
18566   // (tbz (shl x, c), b) -> (tbz x, b-c)
18567   case ISD::SHL:
18568     if (C->getZExtValue() <= Bit &&
18569         (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18570       Bit = Bit - C->getZExtValue();
18571       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18572     }
18573     return Op;
18574 
18575   // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x
18576   case ISD::SRA:
18577     Bit = Bit + C->getZExtValue();
18578     if (Bit >= Op->getValueType(0).getSizeInBits())
18579       Bit = Op->getValueType(0).getSizeInBits() - 1;
18580     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18581 
18582   // (tbz (srl x, c), b) -> (tbz x, b+c)
18583   case ISD::SRL:
18584     if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18585       Bit = Bit + C->getZExtValue();
18586       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18587     }
18588     return Op;
18589 
18590   // (tbz (xor x, -1), b) -> (tbnz x, b)
18591   case ISD::XOR:
18592     if ((C->getZExtValue() >> Bit) & 1)
18593       Invert = !Invert;
18594     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18595   }
18596 }
18597 
18598 // Optimize test single bit zero/non-zero and branch.
18599 static SDValue performTBZCombine(SDNode *N,
18600                                  TargetLowering::DAGCombinerInfo &DCI,
18601                                  SelectionDAG &DAG) {
18602   unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
18603   bool Invert = false;
18604   SDValue TestSrc = N->getOperand(1);
18605   SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
18606 
18607   if (TestSrc == NewTestSrc)
18608     return SDValue();
18609 
18610   unsigned NewOpc = N->getOpcode();
18611   if (Invert) {
18612     if (NewOpc == AArch64ISD::TBZ)
18613       NewOpc = AArch64ISD::TBNZ;
18614     else {
18615       assert(NewOpc == AArch64ISD::TBNZ);
18616       NewOpc = AArch64ISD::TBZ;
18617     }
18618   }
18619 
18620   SDLoc DL(N);
18621   return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc,
18622                      DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3));
18623 }
18624 
18625 // Swap vselect operands where it may allow a predicated operation to achieve
18626 // the `sel`.
18627 //
18628 //     (vselect (setcc ( condcode) (_) (_)) (a)          (op (a) (b)))
18629 //  => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a))
18630 static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {
18631   auto SelectA = N->getOperand(1);
18632   auto SelectB = N->getOperand(2);
18633   auto NTy = N->getValueType(0);
18634 
18635   if (!NTy.isScalableVector())
18636     return SDValue();
18637   SDValue SetCC = N->getOperand(0);
18638   if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse())
18639     return SDValue();
18640 
18641   switch (SelectB.getOpcode()) {
18642   default:
18643     return SDValue();
18644   case ISD::FMUL:
18645   case ISD::FSUB:
18646   case ISD::FADD:
18647     break;
18648   }
18649   if (SelectA != SelectB.getOperand(0))
18650     return SDValue();
18651 
18652   ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
18653   ISD::CondCode InverseCC =
18654       ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType());
18655   auto InverseSetCC =
18656       DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0),
18657                    SetCC.getOperand(1), InverseCC);
18658 
18659   return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy,
18660                      {InverseSetCC, SelectB, SelectA});
18661 }
18662 
18663 // vselect (v1i1 setcc) ->
18664 //     vselect (v1iXX setcc)  (XX is the size of the compared operand type)
18665 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
18666 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
18667 // such VSELECT.
18668 static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
18669   if (auto SwapResult = trySwapVSelectOperands(N, DAG))
18670     return SwapResult;
18671 
18672   SDValue N0 = N->getOperand(0);
18673   EVT CCVT = N0.getValueType();
18674 
18675   if (isAllActivePredicate(DAG, N0))
18676     return N->getOperand(1);
18677 
18678   if (isAllInactivePredicate(N0))
18679     return N->getOperand(2);
18680 
18681   // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
18682   // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
18683   // supported types.
18684   SDValue SetCC = N->getOperand(0);
18685   if (SetCC.getOpcode() == ISD::SETCC &&
18686       SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) {
18687     SDValue CmpLHS = SetCC.getOperand(0);
18688     EVT VT = CmpLHS.getValueType();
18689     SDNode *CmpRHS = SetCC.getOperand(1).getNode();
18690     SDNode *SplatLHS = N->getOperand(1).getNode();
18691     SDNode *SplatRHS = N->getOperand(2).getNode();
18692     APInt SplatLHSVal;
18693     if (CmpLHS.getValueType() == N->getOperand(1).getValueType() &&
18694         VT.isSimple() &&
18695         is_contained(
18696             makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
18697                           MVT::v2i32, MVT::v4i32, MVT::v2i64}),
18698             VT.getSimpleVT().SimpleTy) &&
18699         ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) &&
18700         SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) &&
18701         ISD::isConstantSplatVectorAllOnes(SplatRHS)) {
18702       unsigned NumElts = VT.getVectorNumElements();
18703       SmallVector<SDValue, 8> Ops(
18704           NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N),
18705                                    VT.getScalarType()));
18706       SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops);
18707 
18708       auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val);
18709       auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1));
18710       return Or;
18711     }
18712   }
18713 
18714   if (N0.getOpcode() != ISD::SETCC ||
18715       CCVT.getVectorElementCount() != ElementCount::getFixed(1) ||
18716       CCVT.getVectorElementType() != MVT::i1)
18717     return SDValue();
18718 
18719   EVT ResVT = N->getValueType(0);
18720   EVT CmpVT = N0.getOperand(0).getValueType();
18721   // Only combine when the result type is of the same size as the compared
18722   // operands.
18723   if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
18724     return SDValue();
18725 
18726   SDValue IfTrue = N->getOperand(1);
18727   SDValue IfFalse = N->getOperand(2);
18728   SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
18729                        N0.getOperand(0), N0.getOperand(1),
18730                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
18731   return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
18732                      IfTrue, IfFalse);
18733 }
18734 
18735 /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
18736 /// the compare-mask instructions rather than going via NZCV, even if LHS and
18737 /// RHS are really scalar. This replaces any scalar setcc in the above pattern
18738 /// with a vector one followed by a DUP shuffle on the result.
18739 static SDValue performSelectCombine(SDNode *N,
18740                                     TargetLowering::DAGCombinerInfo &DCI) {
18741   SelectionDAG &DAG = DCI.DAG;
18742   SDValue N0 = N->getOperand(0);
18743   EVT ResVT = N->getValueType(0);
18744 
18745   if (N0.getOpcode() != ISD::SETCC)
18746     return SDValue();
18747 
18748   if (ResVT.isScalableVector())
18749     return SDValue();
18750 
18751   // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
18752   // scalar SetCCResultType. We also don't expect vectors, because we assume
18753   // that selects fed by vector SETCCs are canonicalized to VSELECT.
18754   assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&
18755          "Scalar-SETCC feeding SELECT has unexpected result type!");
18756 
18757   // If NumMaskElts == 0, the comparison is larger than select result. The
18758   // largest real NEON comparison is 64-bits per lane, which means the result is
18759   // at most 32-bits and an illegal vector. Just bail out for now.
18760   EVT SrcVT = N0.getOperand(0).getValueType();
18761 
18762   // Don't try to do this optimization when the setcc itself has i1 operands.
18763   // There are no legal vectors of i1, so this would be pointless.
18764   if (SrcVT == MVT::i1)
18765     return SDValue();
18766 
18767   int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
18768   if (!ResVT.isVector() || NumMaskElts == 0)
18769     return SDValue();
18770 
18771   SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts);
18772   EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
18773 
18774   // Also bail out if the vector CCVT isn't the same size as ResVT.
18775   // This can happen if the SETCC operand size doesn't divide the ResVT size
18776   // (e.g., f64 vs v3f32).
18777   if (CCVT.getSizeInBits() != ResVT.getSizeInBits())
18778     return SDValue();
18779 
18780   // Make sure we didn't create illegal types, if we're not supposed to.
18781   assert(DCI.isBeforeLegalize() ||
18782          DAG.getTargetLoweringInfo().isTypeLegal(SrcVT));
18783 
18784   // First perform a vector comparison, where lane 0 is the one we're interested
18785   // in.
18786   SDLoc DL(N0);
18787   SDValue LHS =
18788       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
18789   SDValue RHS =
18790       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
18791   SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
18792 
18793   // Now duplicate the comparison mask we want across all other lanes.
18794   SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
18795   SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
18796   Mask = DAG.getNode(ISD::BITCAST, DL,
18797                      ResVT.changeVectorElementTypeToInteger(), Mask);
18798 
18799   return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
18800 }
18801 
18802 static SDValue performDUPCombine(SDNode *N,
18803                                  TargetLowering::DAGCombinerInfo &DCI) {
18804   EVT VT = N->getValueType(0);
18805   // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the
18806   // 128bit vector version.
18807   if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) {
18808     EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
18809     if (SDNode *LN = DCI.DAG.getNodeIfExists(
18810             N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) {
18811       SDLoc DL(N);
18812       return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0),
18813                              DCI.DAG.getConstant(0, DL, MVT::i64));
18814     }
18815   }
18816 
18817   return performPostLD1Combine(N, DCI, false);
18818 }
18819 
18820 /// Get rid of unnecessary NVCASTs (that don't change the type).
18821 static SDValue performNVCASTCombine(SDNode *N) {
18822   if (N->getValueType(0) == N->getOperand(0).getValueType())
18823     return N->getOperand(0);
18824 
18825   return SDValue();
18826 }
18827 
18828 // If all users of the globaladdr are of the form (globaladdr + constant), find
18829 // the smallest constant, fold it into the globaladdr's offset and rewrite the
18830 // globaladdr as (globaladdr + constant) - constant.
18831 static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
18832                                            const AArch64Subtarget *Subtarget,
18833                                            const TargetMachine &TM) {
18834   auto *GN = cast<GlobalAddressSDNode>(N);
18835   if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) !=
18836       AArch64II::MO_NO_FLAG)
18837     return SDValue();
18838 
18839   uint64_t MinOffset = -1ull;
18840   for (SDNode *N : GN->uses()) {
18841     if (N->getOpcode() != ISD::ADD)
18842       return SDValue();
18843     auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0));
18844     if (!C)
18845       C = dyn_cast<ConstantSDNode>(N->getOperand(1));
18846     if (!C)
18847       return SDValue();
18848     MinOffset = std::min(MinOffset, C->getZExtValue());
18849   }
18850   uint64_t Offset = MinOffset + GN->getOffset();
18851 
18852   // Require that the new offset is larger than the existing one. Otherwise, we
18853   // can end up oscillating between two possible DAGs, for example,
18854   // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1).
18855   if (Offset <= uint64_t(GN->getOffset()))
18856     return SDValue();
18857 
18858   // Check whether folding this offset is legal. It must not go out of bounds of
18859   // the referenced object to avoid violating the code model, and must be
18860   // smaller than 2^20 because this is the largest offset expressible in all
18861   // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
18862   // stores an immediate signed 21 bit offset.)
18863   //
18864   // This check also prevents us from folding negative offsets, which will end
18865   // up being treated in the same way as large positive ones. They could also
18866   // cause code model violations, and aren't really common enough to matter.
18867   if (Offset >= (1 << 20))
18868     return SDValue();
18869 
18870   const GlobalValue *GV = GN->getGlobal();
18871   Type *T = GV->getValueType();
18872   if (!T->isSized() ||
18873       Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
18874     return SDValue();
18875 
18876   SDLoc DL(GN);
18877   SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset);
18878   return DAG.getNode(ISD::SUB, DL, MVT::i64, Result,
18879                      DAG.getConstant(MinOffset, DL, MVT::i64));
18880 }
18881 
18882 // Turns the vector of indices into a vector of byte offstes by scaling Offset
18883 // by (BitWidth / 8).
18884 static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
18885                                           SDLoc DL, unsigned BitWidth) {
18886   assert(Offset.getValueType().isScalableVector() &&
18887          "This method is only for scalable vectors of offsets");
18888 
18889   SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64);
18890   SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift);
18891 
18892   return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift);
18893 }
18894 
18895 /// Check if the value of \p OffsetInBytes can be used as an immediate for
18896 /// the gather load/prefetch and scatter store instructions with vector base and
18897 /// immediate offset addressing mode:
18898 ///
18899 ///      [<Zn>.[S|D]{, #<imm>}]
18900 ///
18901 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18902 inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
18903                                                   unsigned ScalarSizeInBytes) {
18904   // The immediate is not a multiple of the scalar size.
18905   if (OffsetInBytes % ScalarSizeInBytes)
18906     return false;
18907 
18908   // The immediate is out of range.
18909   if (OffsetInBytes / ScalarSizeInBytes > 31)
18910     return false;
18911 
18912   return true;
18913 }
18914 
18915 /// Check if the value of \p Offset represents a valid immediate for the SVE
18916 /// gather load/prefetch and scatter store instructiona with vector base and
18917 /// immediate offset addressing mode:
18918 ///
18919 ///      [<Zn>.[S|D]{, #<imm>}]
18920 ///
18921 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18922 static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
18923                                            unsigned ScalarSizeInBytes) {
18924   ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
18925   return OffsetConst && isValidImmForSVEVecImmAddrMode(
18926                             OffsetConst->getZExtValue(), ScalarSizeInBytes);
18927 }
18928 
18929 static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
18930                                           unsigned Opcode,
18931                                           bool OnlyPackedOffsets = true) {
18932   const SDValue Src = N->getOperand(2);
18933   const EVT SrcVT = Src->getValueType(0);
18934   assert(SrcVT.isScalableVector() &&
18935          "Scatter stores are only possible for SVE vectors");
18936 
18937   SDLoc DL(N);
18938   MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
18939 
18940   // Make sure that source data will fit into an SVE register
18941   if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
18942     return SDValue();
18943 
18944   // For FPs, ACLE only supports _packed_ single and double precision types.
18945   if (SrcElVT.isFloatingPoint())
18946     if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64))
18947       return SDValue();
18948 
18949   // Depending on the addressing mode, this is either a pointer or a vector of
18950   // pointers (that fits into one register)
18951   SDValue Base = N->getOperand(4);
18952   // Depending on the addressing mode, this is either a single offset or a
18953   // vector of offsets  (that fits into one register)
18954   SDValue Offset = N->getOperand(5);
18955 
18956   // For "scalar + vector of indices", just scale the indices. This only
18957   // applies to non-temporal scatters because there's no instruction that takes
18958   // indicies.
18959   if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
18960     Offset =
18961         getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
18962     Opcode = AArch64ISD::SSTNT1_PRED;
18963   }
18964 
18965   // In the case of non-temporal gather loads there's only one SVE instruction
18966   // per data-size: "scalar + vector", i.e.
18967   //    * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
18968   // Since we do have intrinsics that allow the arguments to be in a different
18969   // order, we may need to swap them to match the spec.
18970   if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector())
18971     std::swap(Base, Offset);
18972 
18973   // SST1_IMM requires that the offset is an immediate that is:
18974   //    * a multiple of #SizeInBytes,
18975   //    * in the range [0, 31 x #SizeInBytes],
18976   // where #SizeInBytes is the size in bytes of the stored items. For
18977   // immediates outside that range and non-immediate scalar offsets use SST1 or
18978   // SST1_UXTW instead.
18979   if (Opcode == AArch64ISD::SST1_IMM_PRED) {
18980     if (!isValidImmForSVEVecImmAddrMode(Offset,
18981                                         SrcVT.getScalarSizeInBits() / 8)) {
18982       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
18983         Opcode = AArch64ISD::SST1_UXTW_PRED;
18984       else
18985         Opcode = AArch64ISD::SST1_PRED;
18986 
18987       std::swap(Base, Offset);
18988     }
18989   }
18990 
18991   auto &TLI = DAG.getTargetLoweringInfo();
18992   if (!TLI.isTypeLegal(Base.getValueType()))
18993     return SDValue();
18994 
18995   // Some scatter store variants allow unpacked offsets, but only as nxv2i32
18996   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
18997   // nxv2i64. Legalize accordingly.
18998   if (!OnlyPackedOffsets &&
18999       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
19000     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
19001 
19002   if (!TLI.isTypeLegal(Offset.getValueType()))
19003     return SDValue();
19004 
19005   // Source value type that is representable in hardware
19006   EVT HwSrcVt = getSVEContainerType(SrcVT);
19007 
19008   // Keep the original type of the input data to store - this is needed to be
19009   // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For
19010   // FP values we want the integer equivalent, so just use HwSrcVt.
19011   SDValue InputVT = DAG.getValueType(SrcVT);
19012   if (SrcVT.isFloatingPoint())
19013     InputVT = DAG.getValueType(HwSrcVt);
19014 
19015   SDVTList VTs = DAG.getVTList(MVT::Other);
19016   SDValue SrcNew;
19017 
19018   if (Src.getValueType().isFloatingPoint())
19019     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src);
19020   else
19021     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src);
19022 
19023   SDValue Ops[] = {N->getOperand(0), // Chain
19024                    SrcNew,
19025                    N->getOperand(3), // Pg
19026                    Base,
19027                    Offset,
19028                    InputVT};
19029 
19030   return DAG.getNode(Opcode, DL, VTs, Ops);
19031 }
19032 
19033 static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
19034                                         unsigned Opcode,
19035                                         bool OnlyPackedOffsets = true) {
19036   const EVT RetVT = N->getValueType(0);
19037   assert(RetVT.isScalableVector() &&
19038          "Gather loads are only possible for SVE vectors");
19039 
19040   SDLoc DL(N);
19041 
19042   // Make sure that the loaded data will fit into an SVE register
19043   if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
19044     return SDValue();
19045 
19046   // Depending on the addressing mode, this is either a pointer or a vector of
19047   // pointers (that fits into one register)
19048   SDValue Base = N->getOperand(3);
19049   // Depending on the addressing mode, this is either a single offset or a
19050   // vector of offsets  (that fits into one register)
19051   SDValue Offset = N->getOperand(4);
19052 
19053   // For "scalar + vector of indices", just scale the indices. This only
19054   // applies to non-temporal gathers because there's no instruction that takes
19055   // indicies.
19056   if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) {
19057     Offset = getScaledOffsetForBitWidth(DAG, Offset, DL,
19058                                         RetVT.getScalarSizeInBits());
19059     Opcode = AArch64ISD::GLDNT1_MERGE_ZERO;
19060   }
19061 
19062   // In the case of non-temporal gather loads there's only one SVE instruction
19063   // per data-size: "scalar + vector", i.e.
19064   //    * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
19065   // Since we do have intrinsics that allow the arguments to be in a different
19066   // order, we may need to swap them to match the spec.
19067   if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO &&
19068       Offset.getValueType().isVector())
19069     std::swap(Base, Offset);
19070 
19071   // GLD{FF}1_IMM requires that the offset is an immediate that is:
19072   //    * a multiple of #SizeInBytes,
19073   //    * in the range [0, 31 x #SizeInBytes],
19074   // where #SizeInBytes is the size in bytes of the loaded items. For
19075   // immediates outside that range and non-immediate scalar offsets use
19076   // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead.
19077   if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO ||
19078       Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) {
19079     if (!isValidImmForSVEVecImmAddrMode(Offset,
19080                                         RetVT.getScalarSizeInBits() / 8)) {
19081       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
19082         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
19083                      ? AArch64ISD::GLD1_UXTW_MERGE_ZERO
19084                      : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO;
19085       else
19086         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
19087                      ? AArch64ISD::GLD1_MERGE_ZERO
19088                      : AArch64ISD::GLDFF1_MERGE_ZERO;
19089 
19090       std::swap(Base, Offset);
19091     }
19092   }
19093 
19094   auto &TLI = DAG.getTargetLoweringInfo();
19095   if (!TLI.isTypeLegal(Base.getValueType()))
19096     return SDValue();
19097 
19098   // Some gather load variants allow unpacked offsets, but only as nxv2i32
19099   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
19100   // nxv2i64. Legalize accordingly.
19101   if (!OnlyPackedOffsets &&
19102       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
19103     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
19104 
19105   // Return value type that is representable in hardware
19106   EVT HwRetVt = getSVEContainerType(RetVT);
19107 
19108   // Keep the original output value type around - this is needed to be able to
19109   // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP
19110   // values we want the integer equivalent, so just use HwRetVT.
19111   SDValue OutVT = DAG.getValueType(RetVT);
19112   if (RetVT.isFloatingPoint())
19113     OutVT = DAG.getValueType(HwRetVt);
19114 
19115   SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other);
19116   SDValue Ops[] = {N->getOperand(0), // Chain
19117                    N->getOperand(2), // Pg
19118                    Base, Offset, OutVT};
19119 
19120   SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops);
19121   SDValue LoadChain = SDValue(Load.getNode(), 1);
19122 
19123   if (RetVT.isInteger() && (RetVT != HwRetVt))
19124     Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0));
19125 
19126   // If the original return value was FP, bitcast accordingly. Doing it here
19127   // means that we can avoid adding TableGen patterns for FPs.
19128   if (RetVT.isFloatingPoint())
19129     Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0));
19130 
19131   return DAG.getMergeValues({Load, LoadChain}, DL);
19132 }
19133 
19134 static SDValue
19135 performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
19136                               SelectionDAG &DAG) {
19137   SDLoc DL(N);
19138   SDValue Src = N->getOperand(0);
19139   unsigned Opc = Src->getOpcode();
19140 
19141   // Sign extend of an unsigned unpack -> signed unpack
19142   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
19143 
19144     unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI
19145                                                : AArch64ISD::SUNPKLO;
19146 
19147     // Push the sign extend to the operand of the unpack
19148     // This is necessary where, for example, the operand of the unpack
19149     // is another unpack:
19150     // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8)
19151     // ->
19152     // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8)
19153     // ->
19154     // 4i32 sunpklo(8i16 sunpklo(16i8 opnd))
19155     SDValue ExtOp = Src->getOperand(0);
19156     auto VT = cast<VTSDNode>(N->getOperand(1))->getVT();
19157     EVT EltTy = VT.getVectorElementType();
19158     (void)EltTy;
19159 
19160     assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&
19161            "Sign extending from an invalid type");
19162 
19163     EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext());
19164 
19165     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(),
19166                               ExtOp, DAG.getValueType(ExtVT));
19167 
19168     return DAG.getNode(SOpc, DL, N->getValueType(0), Ext);
19169   }
19170 
19171   if (DCI.isBeforeLegalizeOps())
19172     return SDValue();
19173 
19174   if (!EnableCombineMGatherIntrinsics)
19175     return SDValue();
19176 
19177   // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
19178   // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
19179   unsigned NewOpc;
19180   unsigned MemVTOpNum = 4;
19181   switch (Opc) {
19182   case AArch64ISD::LD1_MERGE_ZERO:
19183     NewOpc = AArch64ISD::LD1S_MERGE_ZERO;
19184     MemVTOpNum = 3;
19185     break;
19186   case AArch64ISD::LDNF1_MERGE_ZERO:
19187     NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO;
19188     MemVTOpNum = 3;
19189     break;
19190   case AArch64ISD::LDFF1_MERGE_ZERO:
19191     NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO;
19192     MemVTOpNum = 3;
19193     break;
19194   case AArch64ISD::GLD1_MERGE_ZERO:
19195     NewOpc = AArch64ISD::GLD1S_MERGE_ZERO;
19196     break;
19197   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19198     NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
19199     break;
19200   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19201     NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
19202     break;
19203   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19204     NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
19205     break;
19206   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19207     NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
19208     break;
19209   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19210     NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
19211     break;
19212   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19213     NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO;
19214     break;
19215   case AArch64ISD::GLDFF1_MERGE_ZERO:
19216     NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO;
19217     break;
19218   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
19219     NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO;
19220     break;
19221   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
19222     NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO;
19223     break;
19224   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
19225     NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO;
19226     break;
19227   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
19228     NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO;
19229     break;
19230   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
19231     NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO;
19232     break;
19233   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
19234     NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO;
19235     break;
19236   case AArch64ISD::GLDNT1_MERGE_ZERO:
19237     NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO;
19238     break;
19239   default:
19240     return SDValue();
19241   }
19242 
19243   EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
19244   EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT();
19245 
19246   if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse())
19247     return SDValue();
19248 
19249   EVT DstVT = N->getValueType(0);
19250   SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
19251 
19252   SmallVector<SDValue, 5> Ops;
19253   for (unsigned I = 0; I < Src->getNumOperands(); ++I)
19254     Ops.push_back(Src->getOperand(I));
19255 
19256   SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
19257   DCI.CombineTo(N, ExtLoad);
19258   DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
19259 
19260   // Return N so it doesn't get rechecked
19261   return SDValue(N, 0);
19262 }
19263 
19264 /// Legalize the gather prefetch (scalar + vector addressing mode) when the
19265 /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset
19266 /// != nxv2i32) do not need legalization.
19267 static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
19268   const unsigned OffsetPos = 4;
19269   SDValue Offset = N->getOperand(OffsetPos);
19270 
19271   // Not an unpacked vector, bail out.
19272   if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32)
19273     return SDValue();
19274 
19275   // Extend the unpacked offset vector to 64-bit lanes.
19276   SDLoc DL(N);
19277   Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset);
19278   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19279   // Replace the offset operand with the 64-bit one.
19280   Ops[OffsetPos] = Offset;
19281 
19282   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19283 }
19284 
19285 /// Combines a node carrying the intrinsic
19286 /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses
19287 /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to
19288 /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the
19289 /// sve gather prefetch instruction with vector plus immediate addressing mode.
19290 static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
19291                                                unsigned ScalarSizeInBytes) {
19292   const unsigned ImmPos = 4, OffsetPos = 3;
19293   // No need to combine the node if the immediate is valid...
19294   if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes))
19295     return SDValue();
19296 
19297   // ...otherwise swap the offset base with the offset...
19298   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19299   std::swap(Ops[ImmPos], Ops[OffsetPos]);
19300   // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
19301   // `aarch64_sve_prfb_gather_uxtw_index`.
19302   SDLoc DL(N);
19303   Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
19304                            MVT::i64);
19305 
19306   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19307 }
19308 
19309 // Return true if the vector operation can guarantee only the first lane of its
19310 // result contains data, with all bits in other lanes set to zero.
19311 static bool isLanes1toNKnownZero(SDValue Op) {
19312   switch (Op.getOpcode()) {
19313   default:
19314     return false;
19315   case AArch64ISD::ANDV_PRED:
19316   case AArch64ISD::EORV_PRED:
19317   case AArch64ISD::FADDA_PRED:
19318   case AArch64ISD::FADDV_PRED:
19319   case AArch64ISD::FMAXNMV_PRED:
19320   case AArch64ISD::FMAXV_PRED:
19321   case AArch64ISD::FMINNMV_PRED:
19322   case AArch64ISD::FMINV_PRED:
19323   case AArch64ISD::ORV_PRED:
19324   case AArch64ISD::SADDV_PRED:
19325   case AArch64ISD::SMAXV_PRED:
19326   case AArch64ISD::SMINV_PRED:
19327   case AArch64ISD::UADDV_PRED:
19328   case AArch64ISD::UMAXV_PRED:
19329   case AArch64ISD::UMINV_PRED:
19330     return true;
19331   }
19332 }
19333 
19334 static SDValue removeRedundantInsertVectorElt(SDNode *N) {
19335   assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
19336   SDValue InsertVec = N->getOperand(0);
19337   SDValue InsertElt = N->getOperand(1);
19338   SDValue InsertIdx = N->getOperand(2);
19339 
19340   // We only care about inserts into the first element...
19341   if (!isNullConstant(InsertIdx))
19342     return SDValue();
19343   // ...of a zero'd vector...
19344   if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
19345     return SDValue();
19346   // ...where the inserted data was previously extracted...
19347   if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19348     return SDValue();
19349 
19350   SDValue ExtractVec = InsertElt.getOperand(0);
19351   SDValue ExtractIdx = InsertElt.getOperand(1);
19352 
19353   // ...from the first element of a vector.
19354   if (!isNullConstant(ExtractIdx))
19355     return SDValue();
19356 
19357   // If we get here we are effectively trying to zero lanes 1-N of a vector.
19358 
19359   // Ensure there's no type conversion going on.
19360   if (N->getValueType(0) != ExtractVec.getValueType())
19361     return SDValue();
19362 
19363   if (!isLanes1toNKnownZero(ExtractVec))
19364     return SDValue();
19365 
19366   // The explicit zeroing is redundant.
19367   return ExtractVec;
19368 }
19369 
19370 static SDValue
19371 performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
19372   if (SDValue Res = removeRedundantInsertVectorElt(N))
19373     return Res;
19374 
19375   return performPostLD1Combine(N, DCI, true);
19376 }
19377 
19378 static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) {
19379   EVT Ty = N->getValueType(0);
19380   if (Ty.isInteger())
19381     return SDValue();
19382 
19383   EVT IntTy = Ty.changeVectorElementTypeToInteger();
19384   EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount());
19385   if (ExtIntTy.getVectorElementType().getScalarSizeInBits() <
19386       IntTy.getVectorElementType().getScalarSizeInBits())
19387     return SDValue();
19388 
19389   SDLoc DL(N);
19390   SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)),
19391                                      DL, ExtIntTy);
19392   SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)),
19393                                      DL, ExtIntTy);
19394   SDValue Idx = N->getOperand(2);
19395   SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx);
19396   SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy);
19397   return DAG.getBitcast(Ty, Trunc);
19398 }
19399 
19400 static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
19401                                       TargetLowering::DAGCombinerInfo &DCI,
19402                                       const AArch64Subtarget *Subtarget) {
19403   SDValue N0 = N->getOperand(0);
19404   EVT VT = N->getValueType(0);
19405 
19406   // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
19407   if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
19408     return SDValue();
19409 
19410   // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
19411   // We purposefully don't care about legality of the nodes here as we know
19412   // they can be split down into something legal.
19413   if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) &&
19414       N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() &&
19415       VT.isFixedLengthVector() &&
19416       VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) {
19417     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
19418     SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
19419                                      LN0->getChain(), LN0->getBasePtr(),
19420                                      N0.getValueType(), LN0->getMemOperand());
19421     DCI.CombineTo(N, ExtLoad);
19422     DCI.CombineTo(N0.getNode(),
19423                   DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(),
19424                               ExtLoad, DAG.getIntPtrConstant(1, SDLoc(N0))),
19425                   ExtLoad.getValue(1));
19426     return SDValue(N, 0); // Return N so it doesn't get rechecked!
19427   }
19428 
19429   return SDValue();
19430 }
19431 
19432 static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
19433                                       const AArch64Subtarget *Subtarget,
19434                                       bool fixedSVEVectorVT) {
19435   EVT VT = N->getValueType(0);
19436 
19437   // Don't expand for SVE2
19438   if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
19439     return SDValue();
19440 
19441   // Don't expand for NEON
19442   if (VT.isFixedLengthVector() && !fixedSVEVectorVT)
19443     return SDValue();
19444 
19445   SDLoc DL(N);
19446 
19447   SDValue Mask = N->getOperand(0);
19448   SDValue In1 = N->getOperand(1);
19449   SDValue In2 = N->getOperand(2);
19450 
19451   SDValue InvMask = DAG.getNOT(DL, Mask, VT);
19452   SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1);
19453   SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2);
19454   return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv);
19455 }
19456 
19457 static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
19458   EVT VT = N->getValueType(0);
19459 
19460   SDValue Insert = N->getOperand(0);
19461   if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
19462     return SDValue();
19463 
19464   if (!Insert.getOperand(0).isUndef())
19465     return SDValue();
19466 
19467   uint64_t IdxInsert = Insert.getConstantOperandVal(2);
19468   uint64_t IdxDupLane = N->getConstantOperandVal(1);
19469   if (IdxInsert != IdxDupLane)
19470     return SDValue();
19471 
19472   SDValue Bitcast = Insert.getOperand(1);
19473   if (Bitcast.getOpcode() != ISD::BITCAST)
19474     return SDValue();
19475 
19476   SDValue Subvec = Bitcast.getOperand(0);
19477   EVT SubvecVT = Subvec.getValueType();
19478   if (!SubvecVT.is128BitVector())
19479     return SDValue();
19480   EVT NewSubvecVT =
19481       getPackedSVEVectorVT(Subvec.getValueType().getVectorElementType());
19482 
19483   SDLoc DL(N);
19484   SDValue NewInsert =
19485       DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewSubvecVT,
19486                   DAG.getUNDEF(NewSubvecVT), Subvec, Insert->getOperand(2));
19487   SDValue NewDuplane128 = DAG.getNode(AArch64ISD::DUPLANE128, DL, NewSubvecVT,
19488                                       NewInsert, N->getOperand(1));
19489   return DAG.getNode(ISD::BITCAST, DL, VT, NewDuplane128);
19490 }
19491 
19492 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
19493                                                  DAGCombinerInfo &DCI) const {
19494   SelectionDAG &DAG = DCI.DAG;
19495   switch (N->getOpcode()) {
19496   default:
19497     LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
19498     break;
19499   case ISD::ADD:
19500   case ISD::SUB:
19501     return performAddSubCombine(N, DCI, DAG);
19502   case ISD::BUILD_VECTOR:
19503     return performBuildVectorCombine(N, DCI, DAG);
19504   case AArch64ISD::ANDS:
19505     return performFlagSettingCombine(N, DCI, ISD::AND);
19506   case AArch64ISD::ADC:
19507     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19508       return R;
19509     return foldADCToCINC(N, DAG);
19510   case AArch64ISD::SBC:
19511     return foldOverflowCheck(N, DAG, /* IsAdd */ false);
19512   case AArch64ISD::ADCS:
19513     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19514       return R;
19515     return performFlagSettingCombine(N, DCI, AArch64ISD::ADC);
19516   case AArch64ISD::SBCS:
19517     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false))
19518       return R;
19519     return performFlagSettingCombine(N, DCI, AArch64ISD::SBC);
19520   case ISD::XOR:
19521     return performXorCombine(N, DAG, DCI, Subtarget);
19522   case ISD::MUL:
19523     return performMulCombine(N, DAG, DCI, Subtarget);
19524   case ISD::SINT_TO_FP:
19525   case ISD::UINT_TO_FP:
19526     return performIntToFpCombine(N, DAG, Subtarget);
19527   case ISD::FP_TO_SINT:
19528   case ISD::FP_TO_UINT:
19529   case ISD::FP_TO_SINT_SAT:
19530   case ISD::FP_TO_UINT_SAT:
19531     return performFpToIntCombine(N, DAG, DCI, Subtarget);
19532   case ISD::FDIV:
19533     return performFDivCombine(N, DAG, DCI, Subtarget);
19534   case ISD::OR:
19535     return performORCombine(N, DCI, Subtarget);
19536   case ISD::AND:
19537     return performANDCombine(N, DCI);
19538   case ISD::INTRINSIC_WO_CHAIN:
19539     return performIntrinsicCombine(N, DCI, Subtarget);
19540   case ISD::ANY_EXTEND:
19541   case ISD::ZERO_EXTEND:
19542   case ISD::SIGN_EXTEND:
19543     return performExtendCombine(N, DCI, DAG);
19544   case ISD::SIGN_EXTEND_INREG:
19545     return performSignExtendInRegCombine(N, DCI, DAG);
19546   case ISD::CONCAT_VECTORS:
19547     return performConcatVectorsCombine(N, DCI, DAG);
19548   case ISD::EXTRACT_SUBVECTOR:
19549     return performExtractSubvectorCombine(N, DCI, DAG);
19550   case ISD::INSERT_SUBVECTOR:
19551     return performInsertSubvectorCombine(N, DCI, DAG);
19552   case ISD::SELECT:
19553     return performSelectCombine(N, DCI);
19554   case ISD::VSELECT:
19555     return performVSelectCombine(N, DCI.DAG);
19556   case ISD::SETCC:
19557     return performSETCCCombine(N, DCI, DAG);
19558   case ISD::LOAD:
19559     if (performTBISimplification(N->getOperand(1), DCI, DAG))
19560       return SDValue(N, 0);
19561     break;
19562   case ISD::STORE:
19563     return performSTORECombine(N, DCI, DAG, Subtarget);
19564   case ISD::MSTORE:
19565     return performMSTORECombine(N, DCI, DAG, Subtarget);
19566   case ISD::MGATHER:
19567   case ISD::MSCATTER:
19568     return performMaskedGatherScatterCombine(N, DCI, DAG);
19569   case ISD::VECTOR_SPLICE:
19570     return performSVESpliceCombine(N, DAG);
19571   case ISD::FP_EXTEND:
19572     return performFPExtendCombine(N, DAG, DCI, Subtarget);
19573   case AArch64ISD::BRCOND:
19574     return performBRCONDCombine(N, DCI, DAG);
19575   case AArch64ISD::TBNZ:
19576   case AArch64ISD::TBZ:
19577     return performTBZCombine(N, DCI, DAG);
19578   case AArch64ISD::CSEL:
19579     return performCSELCombine(N, DCI, DAG);
19580   case AArch64ISD::DUP:
19581     return performDUPCombine(N, DCI);
19582   case AArch64ISD::DUPLANE128:
19583     return performDupLane128Combine(N, DAG);
19584   case AArch64ISD::NVCAST:
19585     return performNVCASTCombine(N);
19586   case AArch64ISD::SPLICE:
19587     return performSpliceCombine(N, DAG);
19588   case AArch64ISD::UUNPKLO:
19589   case AArch64ISD::UUNPKHI:
19590     return performUnpackCombine(N, DAG, Subtarget);
19591   case AArch64ISD::UZP1:
19592     return performUzpCombine(N, DAG);
19593   case AArch64ISD::SETCC_MERGE_ZERO:
19594     return performSetccMergeZeroCombine(N, DCI);
19595   case AArch64ISD::GLD1_MERGE_ZERO:
19596   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19597   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19598   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19599   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19600   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19601   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19602   case AArch64ISD::GLD1S_MERGE_ZERO:
19603   case AArch64ISD::GLD1S_SCALED_MERGE_ZERO:
19604   case AArch64ISD::GLD1S_UXTW_MERGE_ZERO:
19605   case AArch64ISD::GLD1S_SXTW_MERGE_ZERO:
19606   case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO:
19607   case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO:
19608   case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
19609     return performGLD1Combine(N, DAG);
19610   case AArch64ISD::VASHR:
19611   case AArch64ISD::VLSHR:
19612     return performVectorShiftCombine(N, *this, DCI);
19613   case AArch64ISD::SUNPKLO:
19614     return performSunpkloCombine(N, DAG);
19615   case AArch64ISD::BSP:
19616     return performBSPExpandForSVE(
19617         N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0)));
19618   case ISD::INSERT_VECTOR_ELT:
19619     return performInsertVectorEltCombine(N, DCI);
19620   case ISD::EXTRACT_VECTOR_ELT:
19621     return performExtractVectorEltCombine(N, DCI, Subtarget);
19622   case ISD::VECREDUCE_ADD:
19623     return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
19624   case AArch64ISD::UADDV:
19625     return performUADDVCombine(N, DAG);
19626   case AArch64ISD::SMULL:
19627   case AArch64ISD::UMULL:
19628     return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG);
19629   case ISD::INTRINSIC_VOID:
19630   case ISD::INTRINSIC_W_CHAIN:
19631     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
19632     case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
19633       return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
19634     case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
19635       return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/);
19636     case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
19637       return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/);
19638     case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
19639       return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/);
19640     case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
19641     case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
19642     case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
19643     case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
19644     case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
19645     case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
19646     case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
19647     case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
19648       return legalizeSVEGatherPrefetchOffsVec(N, DAG);
19649     case Intrinsic::aarch64_neon_ld2:
19650     case Intrinsic::aarch64_neon_ld3:
19651     case Intrinsic::aarch64_neon_ld4:
19652     case Intrinsic::aarch64_neon_ld1x2:
19653     case Intrinsic::aarch64_neon_ld1x3:
19654     case Intrinsic::aarch64_neon_ld1x4:
19655     case Intrinsic::aarch64_neon_ld2lane:
19656     case Intrinsic::aarch64_neon_ld3lane:
19657     case Intrinsic::aarch64_neon_ld4lane:
19658     case Intrinsic::aarch64_neon_ld2r:
19659     case Intrinsic::aarch64_neon_ld3r:
19660     case Intrinsic::aarch64_neon_ld4r:
19661     case Intrinsic::aarch64_neon_st2:
19662     case Intrinsic::aarch64_neon_st3:
19663     case Intrinsic::aarch64_neon_st4:
19664     case Intrinsic::aarch64_neon_st1x2:
19665     case Intrinsic::aarch64_neon_st1x3:
19666     case Intrinsic::aarch64_neon_st1x4:
19667     case Intrinsic::aarch64_neon_st2lane:
19668     case Intrinsic::aarch64_neon_st3lane:
19669     case Intrinsic::aarch64_neon_st4lane:
19670       return performNEONPostLDSTCombine(N, DCI, DAG);
19671     case Intrinsic::aarch64_sve_ldnt1:
19672       return performLDNT1Combine(N, DAG);
19673     case Intrinsic::aarch64_sve_ld1rq:
19674       return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG);
19675     case Intrinsic::aarch64_sve_ld1ro:
19676       return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG);
19677     case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
19678       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19679     case Intrinsic::aarch64_sve_ldnt1_gather:
19680       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19681     case Intrinsic::aarch64_sve_ldnt1_gather_index:
19682       return performGatherLoadCombine(N, DAG,
19683                                       AArch64ISD::GLDNT1_INDEX_MERGE_ZERO);
19684     case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
19685       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19686     case Intrinsic::aarch64_sve_ld1:
19687       return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO);
19688     case Intrinsic::aarch64_sve_ldnf1:
19689       return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO);
19690     case Intrinsic::aarch64_sve_ldff1:
19691       return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO);
19692     case Intrinsic::aarch64_sve_st1:
19693       return performST1Combine(N, DAG);
19694     case Intrinsic::aarch64_sve_stnt1:
19695       return performSTNT1Combine(N, DAG);
19696     case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
19697       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19698     case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
19699       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19700     case Intrinsic::aarch64_sve_stnt1_scatter:
19701       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19702     case Intrinsic::aarch64_sve_stnt1_scatter_index:
19703       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED);
19704     case Intrinsic::aarch64_sve_ld1_gather:
19705       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO);
19706     case Intrinsic::aarch64_sve_ld1_gather_index:
19707       return performGatherLoadCombine(N, DAG,
19708                                       AArch64ISD::GLD1_SCALED_MERGE_ZERO);
19709     case Intrinsic::aarch64_sve_ld1_gather_sxtw:
19710       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO,
19711                                       /*OnlyPackedOffsets=*/false);
19712     case Intrinsic::aarch64_sve_ld1_gather_uxtw:
19713       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO,
19714                                       /*OnlyPackedOffsets=*/false);
19715     case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
19716       return performGatherLoadCombine(N, DAG,
19717                                       AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO,
19718                                       /*OnlyPackedOffsets=*/false);
19719     case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
19720       return performGatherLoadCombine(N, DAG,
19721                                       AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO,
19722                                       /*OnlyPackedOffsets=*/false);
19723     case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
19724       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO);
19725     case Intrinsic::aarch64_sve_ldff1_gather:
19726       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO);
19727     case Intrinsic::aarch64_sve_ldff1_gather_index:
19728       return performGatherLoadCombine(N, DAG,
19729                                       AArch64ISD::GLDFF1_SCALED_MERGE_ZERO);
19730     case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
19731       return performGatherLoadCombine(N, DAG,
19732                                       AArch64ISD::GLDFF1_SXTW_MERGE_ZERO,
19733                                       /*OnlyPackedOffsets=*/false);
19734     case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
19735       return performGatherLoadCombine(N, DAG,
19736                                       AArch64ISD::GLDFF1_UXTW_MERGE_ZERO,
19737                                       /*OnlyPackedOffsets=*/false);
19738     case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
19739       return performGatherLoadCombine(N, DAG,
19740                                       AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO,
19741                                       /*OnlyPackedOffsets=*/false);
19742     case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
19743       return performGatherLoadCombine(N, DAG,
19744                                       AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO,
19745                                       /*OnlyPackedOffsets=*/false);
19746     case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
19747       return performGatherLoadCombine(N, DAG,
19748                                       AArch64ISD::GLDFF1_IMM_MERGE_ZERO);
19749     case Intrinsic::aarch64_sve_st1_scatter:
19750       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED);
19751     case Intrinsic::aarch64_sve_st1_scatter_index:
19752       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED);
19753     case Intrinsic::aarch64_sve_st1_scatter_sxtw:
19754       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED,
19755                                         /*OnlyPackedOffsets=*/false);
19756     case Intrinsic::aarch64_sve_st1_scatter_uxtw:
19757       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED,
19758                                         /*OnlyPackedOffsets=*/false);
19759     case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
19760       return performScatterStoreCombine(N, DAG,
19761                                         AArch64ISD::SST1_SXTW_SCALED_PRED,
19762                                         /*OnlyPackedOffsets=*/false);
19763     case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
19764       return performScatterStoreCombine(N, DAG,
19765                                         AArch64ISD::SST1_UXTW_SCALED_PRED,
19766                                         /*OnlyPackedOffsets=*/false);
19767     case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
19768       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
19769     case Intrinsic::aarch64_sve_tuple_get: {
19770       SDLoc DL(N);
19771       SDValue Chain = N->getOperand(0);
19772       SDValue Src1 = N->getOperand(2);
19773       SDValue Idx = N->getOperand(3);
19774 
19775       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19776       EVT ResVT = N->getValueType(0);
19777       uint64_t NumLanes = ResVT.getVectorElementCount().getKnownMinValue();
19778       SDValue ExtIdx = DAG.getVectorIdxConstant(IdxConst * NumLanes, DL);
19779       SDValue Val =
19780           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Src1, ExtIdx);
19781       return DAG.getMergeValues({Val, Chain}, DL);
19782     }
19783     case Intrinsic::aarch64_sve_tuple_set: {
19784       SDLoc DL(N);
19785       SDValue Chain = N->getOperand(0);
19786       SDValue Tuple = N->getOperand(2);
19787       SDValue Idx = N->getOperand(3);
19788       SDValue Vec = N->getOperand(4);
19789 
19790       EVT TupleVT = Tuple.getValueType();
19791       uint64_t TupleLanes = TupleVT.getVectorElementCount().getKnownMinValue();
19792 
19793       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19794       uint64_t NumLanes =
19795           Vec.getValueType().getVectorElementCount().getKnownMinValue();
19796 
19797       if ((TupleLanes % NumLanes) != 0)
19798         report_fatal_error("invalid tuple vector!");
19799 
19800       uint64_t NumVecs = TupleLanes / NumLanes;
19801 
19802       SmallVector<SDValue, 4> Opnds;
19803       for (unsigned I = 0; I < NumVecs; ++I) {
19804         if (I == IdxConst)
19805           Opnds.push_back(Vec);
19806         else {
19807           SDValue ExtIdx = DAG.getVectorIdxConstant(I * NumLanes, DL);
19808           Opnds.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
19809                                       Vec.getValueType(), Tuple, ExtIdx));
19810         }
19811       }
19812       SDValue Concat =
19813           DAG.getNode(ISD::CONCAT_VECTORS, DL, Tuple.getValueType(), Opnds);
19814       return DAG.getMergeValues({Concat, Chain}, DL);
19815     }
19816     case Intrinsic::aarch64_sve_tuple_create2:
19817     case Intrinsic::aarch64_sve_tuple_create3:
19818     case Intrinsic::aarch64_sve_tuple_create4: {
19819       SDLoc DL(N);
19820       SDValue Chain = N->getOperand(0);
19821 
19822       SmallVector<SDValue, 4> Opnds;
19823       for (unsigned I = 2; I < N->getNumOperands(); ++I)
19824         Opnds.push_back(N->getOperand(I));
19825 
19826       EVT VT = Opnds[0].getValueType();
19827       EVT EltVT = VT.getVectorElementType();
19828       EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
19829                                     VT.getVectorElementCount() *
19830                                         (N->getNumOperands() - 2));
19831       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds);
19832       return DAG.getMergeValues({Concat, Chain}, DL);
19833     }
19834     case Intrinsic::aarch64_sve_ld2:
19835     case Intrinsic::aarch64_sve_ld3:
19836     case Intrinsic::aarch64_sve_ld4: {
19837       SDLoc DL(N);
19838       SDValue Chain = N->getOperand(0);
19839       SDValue Mask = N->getOperand(2);
19840       SDValue BasePtr = N->getOperand(3);
19841       SDValue LoadOps[] = {Chain, Mask, BasePtr};
19842       unsigned IntrinsicID =
19843           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19844       SDValue Result =
19845           LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL);
19846       return DAG.getMergeValues({Result, Chain}, DL);
19847     }
19848     case Intrinsic::aarch64_rndr:
19849     case Intrinsic::aarch64_rndrrs: {
19850       unsigned IntrinsicID =
19851           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19852       auto Register =
19853           (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
19854                                                   : AArch64SysReg::RNDRRS);
19855       SDLoc DL(N);
19856       SDValue A = DAG.getNode(
19857           AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
19858           N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64));
19859       SDValue B = DAG.getNode(
19860           AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
19861           DAG.getConstant(0, DL, MVT::i32),
19862           DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
19863       return DAG.getMergeValues(
19864           {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
19865     }
19866     default:
19867       break;
19868     }
19869     break;
19870   case ISD::GlobalAddress:
19871     return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine());
19872   }
19873   return SDValue();
19874 }
19875 
19876 // Check if the return value is used as only a return value, as otherwise
19877 // we can't perform a tail-call. In particular, we need to check for
19878 // target ISD nodes that are returns and any other "odd" constructs
19879 // that the generic analysis code won't necessarily catch.
19880 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
19881                                                SDValue &Chain) const {
19882   if (N->getNumValues() != 1)
19883     return false;
19884   if (!N->hasNUsesOfValue(1, 0))
19885     return false;
19886 
19887   SDValue TCChain = Chain;
19888   SDNode *Copy = *N->use_begin();
19889   if (Copy->getOpcode() == ISD::CopyToReg) {
19890     // If the copy has a glue operand, we conservatively assume it isn't safe to
19891     // perform a tail call.
19892     if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
19893         MVT::Glue)
19894       return false;
19895     TCChain = Copy->getOperand(0);
19896   } else if (Copy->getOpcode() != ISD::FP_EXTEND)
19897     return false;
19898 
19899   bool HasRet = false;
19900   for (SDNode *Node : Copy->uses()) {
19901     if (Node->getOpcode() != AArch64ISD::RET_FLAG)
19902       return false;
19903     HasRet = true;
19904   }
19905 
19906   if (!HasRet)
19907     return false;
19908 
19909   Chain = TCChain;
19910   return true;
19911 }
19912 
19913 // Return whether the an instruction can potentially be optimized to a tail
19914 // call. This will cause the optimizers to attempt to move, or duplicate,
19915 // return instructions to help enable tail call optimizations for this
19916 // instruction.
19917 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
19918   return CI->isTailCall();
19919 }
19920 
19921 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
19922                                                    SDValue &Offset,
19923                                                    ISD::MemIndexedMode &AM,
19924                                                    bool &IsInc,
19925                                                    SelectionDAG &DAG) const {
19926   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
19927     return false;
19928 
19929   Base = Op->getOperand(0);
19930   // All of the indexed addressing mode instructions take a signed
19931   // 9 bit immediate offset.
19932   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
19933     int64_t RHSC = RHS->getSExtValue();
19934     if (Op->getOpcode() == ISD::SUB)
19935       RHSC = -(uint64_t)RHSC;
19936     if (!isInt<9>(RHSC))
19937       return false;
19938     IsInc = (Op->getOpcode() == ISD::ADD);
19939     Offset = Op->getOperand(1);
19940     return true;
19941   }
19942   return false;
19943 }
19944 
19945 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
19946                                                       SDValue &Offset,
19947                                                       ISD::MemIndexedMode &AM,
19948                                                       SelectionDAG &DAG) const {
19949   EVT VT;
19950   SDValue Ptr;
19951   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19952     VT = LD->getMemoryVT();
19953     Ptr = LD->getBasePtr();
19954   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19955     VT = ST->getMemoryVT();
19956     Ptr = ST->getBasePtr();
19957   } else
19958     return false;
19959 
19960   bool IsInc;
19961   if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
19962     return false;
19963   AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
19964   return true;
19965 }
19966 
19967 bool AArch64TargetLowering::getPostIndexedAddressParts(
19968     SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
19969     ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
19970   EVT VT;
19971   SDValue Ptr;
19972   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19973     VT = LD->getMemoryVT();
19974     Ptr = LD->getBasePtr();
19975   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19976     VT = ST->getMemoryVT();
19977     Ptr = ST->getBasePtr();
19978   } else
19979     return false;
19980 
19981   bool IsInc;
19982   if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
19983     return false;
19984   // Post-indexing updates the base, so it's not a valid transform
19985   // if that's not the same as the load's pointer.
19986   if (Ptr != Base)
19987     return false;
19988   AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
19989   return true;
19990 }
19991 
19992 void AArch64TargetLowering::ReplaceBITCASTResults(
19993     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19994   SDLoc DL(N);
19995   SDValue Op = N->getOperand(0);
19996   EVT VT = N->getValueType(0);
19997   EVT SrcVT = Op.getValueType();
19998 
19999   if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) {
20000     assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
20001            "Expected fp->int bitcast!");
20002 
20003     // Bitcasting between unpacked vector types of different element counts is
20004     // not a NOP because the live elements are laid out differently.
20005     //                01234567
20006     // e.g. nxv2i32 = XX??XX??
20007     //      nxv4f16 = X?X?X?X?
20008     if (VT.getVectorElementCount() != SrcVT.getVectorElementCount())
20009       return;
20010 
20011     SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG);
20012     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult));
20013     return;
20014   }
20015 
20016   if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16))
20017     return;
20018 
20019   Op = SDValue(
20020       DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
20021                          DAG.getUNDEF(MVT::i32), Op,
20022                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
20023       0);
20024   Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
20025   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
20026 }
20027 
20028 static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
20029                                SelectionDAG &DAG,
20030                                const AArch64Subtarget *Subtarget) {
20031   EVT VT = N->getValueType(0);
20032   if (!VT.is256BitVector() ||
20033       (VT.getScalarType().isFloatingPoint() &&
20034        !N->getFlags().hasAllowReassociation()) ||
20035       (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16()))
20036     return;
20037 
20038   SDValue X = N->getOperand(0);
20039   auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1));
20040   if (!Shuf) {
20041     Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0));
20042     X = N->getOperand(1);
20043     if (!Shuf)
20044       return;
20045   }
20046 
20047   if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
20048     return;
20049 
20050   // Check the mask is 1,0,3,2,5,4,...
20051   ArrayRef<int> Mask = Shuf->getMask();
20052   for (int I = 0, E = Mask.size(); I < E; I++)
20053     if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1))
20054       return;
20055 
20056   SDLoc DL(N);
20057   auto LoHi = DAG.SplitVector(X, DL);
20058   assert(LoHi.first.getValueType() == LoHi.second.getValueType());
20059   SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(),
20060                              LoHi.first, LoHi.second);
20061 
20062   // Shuffle the elements back into order.
20063   SmallVector<int> NMask;
20064   for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) {
20065     NMask.push_back(I);
20066     NMask.push_back(I);
20067   }
20068   Results.push_back(
20069       DAG.getVectorShuffle(VT, DL,
20070                            DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp,
20071                                        DAG.getUNDEF(LoHi.first.getValueType())),
20072                            DAG.getUNDEF(VT), NMask));
20073 }
20074 
20075 static void ReplaceReductionResults(SDNode *N,
20076                                     SmallVectorImpl<SDValue> &Results,
20077                                     SelectionDAG &DAG, unsigned InterOp,
20078                                     unsigned AcrossOp) {
20079   EVT LoVT, HiVT;
20080   SDValue Lo, Hi;
20081   SDLoc dl(N);
20082   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
20083   std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
20084   SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi);
20085   SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal);
20086   Results.push_back(SplitVal);
20087 }
20088 
20089 static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) {
20090   SDLoc DL(N);
20091   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N);
20092   SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64,
20093                            DAG.getNode(ISD::SRL, DL, MVT::i128, N,
20094                                        DAG.getConstant(64, DL, MVT::i64)));
20095   return std::make_pair(Lo, Hi);
20096 }
20097 
20098 void AArch64TargetLowering::ReplaceExtractSubVectorResults(
20099     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20100   SDValue In = N->getOperand(0);
20101   EVT InVT = In.getValueType();
20102 
20103   // Common code will handle these just fine.
20104   if (!InVT.isScalableVector() || !InVT.isInteger())
20105     return;
20106 
20107   SDLoc DL(N);
20108   EVT VT = N->getValueType(0);
20109 
20110   // The following checks bail if this is not a halving operation.
20111 
20112   ElementCount ResEC = VT.getVectorElementCount();
20113 
20114   if (InVT.getVectorElementCount() != (ResEC * 2))
20115     return;
20116 
20117   auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1));
20118   if (!CIndex)
20119     return;
20120 
20121   unsigned Index = CIndex->getZExtValue();
20122   if ((Index != 0) && (Index != ResEC.getKnownMinValue()))
20123     return;
20124 
20125   unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI;
20126   EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext());
20127 
20128   SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0));
20129   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
20130 }
20131 
20132 // Create an even/odd pair of X registers holding integer value V.
20133 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
20134   SDLoc dl(V.getNode());
20135   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64);
20136   SDValue VHi = DAG.getAnyExtOrTrunc(
20137       DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)),
20138       dl, MVT::i64);
20139   if (DAG.getDataLayout().isBigEndian())
20140     std::swap (VLo, VHi);
20141   SDValue RegClass =
20142       DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32);
20143   SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32);
20144   SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32);
20145   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
20146   return SDValue(
20147       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
20148 }
20149 
20150 static void ReplaceCMP_SWAP_128Results(SDNode *N,
20151                                        SmallVectorImpl<SDValue> &Results,
20152                                        SelectionDAG &DAG,
20153                                        const AArch64Subtarget *Subtarget) {
20154   assert(N->getValueType(0) == MVT::i128 &&
20155          "AtomicCmpSwap on types less than 128 should be legal");
20156 
20157   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
20158   if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) {
20159     // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type,
20160     // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG.
20161     SDValue Ops[] = {
20162         createGPRPairNode(DAG, N->getOperand(2)), // Compare value
20163         createGPRPairNode(DAG, N->getOperand(3)), // Store value
20164         N->getOperand(1), // Ptr
20165         N->getOperand(0), // Chain in
20166     };
20167 
20168     unsigned Opcode;
20169     switch (MemOp->getMergedOrdering()) {
20170     case AtomicOrdering::Monotonic:
20171       Opcode = AArch64::CASPX;
20172       break;
20173     case AtomicOrdering::Acquire:
20174       Opcode = AArch64::CASPAX;
20175       break;
20176     case AtomicOrdering::Release:
20177       Opcode = AArch64::CASPLX;
20178       break;
20179     case AtomicOrdering::AcquireRelease:
20180     case AtomicOrdering::SequentiallyConsistent:
20181       Opcode = AArch64::CASPALX;
20182       break;
20183     default:
20184       llvm_unreachable("Unexpected ordering!");
20185     }
20186 
20187     MachineSDNode *CmpSwap = DAG.getMachineNode(
20188         Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops);
20189     DAG.setNodeMemRefs(CmpSwap, {MemOp});
20190 
20191     unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
20192     if (DAG.getDataLayout().isBigEndian())
20193       std::swap(SubReg1, SubReg2);
20194     SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64,
20195                                             SDValue(CmpSwap, 0));
20196     SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64,
20197                                             SDValue(CmpSwap, 0));
20198     Results.push_back(
20199         DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi));
20200     Results.push_back(SDValue(CmpSwap, 1)); // Chain out
20201     return;
20202   }
20203 
20204   unsigned Opcode;
20205   switch (MemOp->getMergedOrdering()) {
20206   case AtomicOrdering::Monotonic:
20207     Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
20208     break;
20209   case AtomicOrdering::Acquire:
20210     Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
20211     break;
20212   case AtomicOrdering::Release:
20213     Opcode = AArch64::CMP_SWAP_128_RELEASE;
20214     break;
20215   case AtomicOrdering::AcquireRelease:
20216   case AtomicOrdering::SequentiallyConsistent:
20217     Opcode = AArch64::CMP_SWAP_128;
20218     break;
20219   default:
20220     llvm_unreachable("Unexpected ordering!");
20221   }
20222 
20223   auto Desired = splitInt128(N->getOperand(2), DAG);
20224   auto New = splitInt128(N->getOperand(3), DAG);
20225   SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second,
20226                    New.first,        New.second,    N->getOperand(0)};
20227   SDNode *CmpSwap = DAG.getMachineNode(
20228       Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other),
20229       Ops);
20230   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
20231 
20232   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20233                                 SDValue(CmpSwap, 0), SDValue(CmpSwap, 1)));
20234   Results.push_back(SDValue(CmpSwap, 3));
20235 }
20236 
20237 void AArch64TargetLowering::ReplaceNodeResults(
20238     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
20239   switch (N->getOpcode()) {
20240   default:
20241     llvm_unreachable("Don't know how to custom expand this");
20242   case ISD::BITCAST:
20243     ReplaceBITCASTResults(N, Results, DAG);
20244     return;
20245   case ISD::VECREDUCE_ADD:
20246   case ISD::VECREDUCE_SMAX:
20247   case ISD::VECREDUCE_SMIN:
20248   case ISD::VECREDUCE_UMAX:
20249   case ISD::VECREDUCE_UMIN:
20250     Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
20251     return;
20252   case ISD::ADD:
20253   case ISD::FADD:
20254     ReplaceAddWithADDP(N, Results, DAG, Subtarget);
20255     return;
20256 
20257   case ISD::CTPOP:
20258   case ISD::PARITY:
20259     if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG))
20260       Results.push_back(Result);
20261     return;
20262   case AArch64ISD::SADDV:
20263     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
20264     return;
20265   case AArch64ISD::UADDV:
20266     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV);
20267     return;
20268   case AArch64ISD::SMINV:
20269     ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV);
20270     return;
20271   case AArch64ISD::UMINV:
20272     ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV);
20273     return;
20274   case AArch64ISD::SMAXV:
20275     ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV);
20276     return;
20277   case AArch64ISD::UMAXV:
20278     ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV);
20279     return;
20280   case ISD::FP_TO_UINT:
20281   case ISD::FP_TO_SINT:
20282   case ISD::STRICT_FP_TO_SINT:
20283   case ISD::STRICT_FP_TO_UINT:
20284     assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
20285     // Let normal code take care of it by not adding anything to Results.
20286     return;
20287   case ISD::ATOMIC_CMP_SWAP:
20288     ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget);
20289     return;
20290   case ISD::ATOMIC_LOAD:
20291   case ISD::LOAD: {
20292     assert(SDValue(N, 0).getValueType() == MVT::i128 &&
20293            "unexpected load's value type");
20294     MemSDNode *LoadNode = cast<MemSDNode>(N);
20295     if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) ||
20296         LoadNode->getMemoryVT() != MVT::i128) {
20297       // Non-volatile or atomic loads are optimized later in AArch64's load/store
20298       // optimizer.
20299       return;
20300     }
20301 
20302     SDValue Result = DAG.getMemIntrinsicNode(
20303         AArch64ISD::LDP, SDLoc(N),
20304         DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
20305         {LoadNode->getChain(), LoadNode->getBasePtr()}, LoadNode->getMemoryVT(),
20306         LoadNode->getMemOperand());
20307 
20308     SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20309                                Result.getValue(0), Result.getValue(1));
20310     Results.append({Pair, Result.getValue(2) /* Chain */});
20311     return;
20312   }
20313   case ISD::EXTRACT_SUBVECTOR:
20314     ReplaceExtractSubVectorResults(N, Results, DAG);
20315     return;
20316   case ISD::INSERT_SUBVECTOR:
20317   case ISD::CONCAT_VECTORS:
20318     // Custom lowering has been requested for INSERT_SUBVECTOR and
20319     // CONCAT_VECTORS -- but delegate to common code for result type
20320     // legalisation
20321     return;
20322   case ISD::INTRINSIC_WO_CHAIN: {
20323     EVT VT = N->getValueType(0);
20324     assert((VT == MVT::i8 || VT == MVT::i16) &&
20325            "custom lowering for unexpected type");
20326 
20327     ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0));
20328     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
20329     switch (IntID) {
20330     default:
20331       return;
20332     case Intrinsic::aarch64_sve_clasta_n: {
20333       SDLoc DL(N);
20334       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20335       auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32,
20336                            N->getOperand(1), Op2, N->getOperand(3));
20337       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20338       return;
20339     }
20340     case Intrinsic::aarch64_sve_clastb_n: {
20341       SDLoc DL(N);
20342       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20343       auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32,
20344                            N->getOperand(1), Op2, N->getOperand(3));
20345       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20346       return;
20347     }
20348     case Intrinsic::aarch64_sve_lasta: {
20349       SDLoc DL(N);
20350       auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32,
20351                            N->getOperand(1), N->getOperand(2));
20352       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20353       return;
20354     }
20355     case Intrinsic::aarch64_sve_lastb: {
20356       SDLoc DL(N);
20357       auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32,
20358                            N->getOperand(1), N->getOperand(2));
20359       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20360       return;
20361     }
20362     }
20363   }
20364   }
20365 }
20366 
20367 bool AArch64TargetLowering::useLoadStackGuardNode() const {
20368   if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
20369     return TargetLowering::useLoadStackGuardNode();
20370   return true;
20371 }
20372 
20373 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
20374   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
20375   // reciprocal if there are three or more FDIVs.
20376   return 3;
20377 }
20378 
20379 TargetLoweringBase::LegalizeTypeAction
20380 AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
20381   // During type legalization, we prefer to widen v1i8, v1i16, v1i32  to v8i8,
20382   // v4i16, v2i32 instead of to promote.
20383   if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 ||
20384       VT == MVT::v1f32)
20385     return TypeWidenVector;
20386 
20387   return TargetLoweringBase::getPreferredVectorAction(VT);
20388 }
20389 
20390 // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic
20391 // provided the address is 16-byte aligned.
20392 bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
20393   if (!Subtarget->hasLSE2())
20394     return false;
20395 
20396   if (auto LI = dyn_cast<LoadInst>(I))
20397     return LI->getType()->getPrimitiveSizeInBits() == 128 &&
20398            LI->getAlign() >= Align(16);
20399 
20400   if (auto SI = dyn_cast<StoreInst>(I))
20401     return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 &&
20402            SI->getAlign() >= Align(16);
20403 
20404   return false;
20405 }
20406 
20407 bool AArch64TargetLowering::shouldInsertFencesForAtomic(
20408     const Instruction *I) const {
20409   return isOpSuitableForLDPSTP(I);
20410 }
20411 
20412 // Loads and stores less than 128-bits are already atomic; ones above that
20413 // are doomed anyway, so defer to the default libcall and blame the OS when
20414 // things go wrong.
20415 TargetLoweringBase::AtomicExpansionKind
20416 AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
20417   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
20418   if (Size != 128 || isOpSuitableForLDPSTP(SI))
20419     return AtomicExpansionKind::None;
20420   return AtomicExpansionKind::Expand;
20421 }
20422 
20423 // Loads and stores less than 128-bits are already atomic; ones above that
20424 // are doomed anyway, so defer to the default libcall and blame the OS when
20425 // things go wrong.
20426 TargetLowering::AtomicExpansionKind
20427 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
20428   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
20429 
20430   if (Size != 128 || isOpSuitableForLDPSTP(LI))
20431     return AtomicExpansionKind::None;
20432 
20433   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20434   // implement atomicrmw without spilling. If the target address is also on the
20435   // stack and close enough to the spill slot, this can lead to a situation
20436   // where the monitor always gets cleared and the atomic operation can never
20437   // succeed. So at -O0 lower this operation to a CAS loop.
20438   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20439     return AtomicExpansionKind::CmpXChg;
20440 
20441   return AtomicExpansionKind::LLSC;
20442 }
20443 
20444 // For the real atomic operations, we have ldxr/stxr up to 128 bits,
20445 TargetLowering::AtomicExpansionKind
20446 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
20447   if (AI->isFloatingPointOperation())
20448     return AtomicExpansionKind::CmpXChg;
20449 
20450   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
20451   if (Size > 128) return AtomicExpansionKind::None;
20452 
20453   // Nand is not supported in LSE.
20454   // Leave 128 bits to LLSC or CmpXChg.
20455   if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) {
20456     if (Subtarget->hasLSE())
20457       return AtomicExpansionKind::None;
20458     if (Subtarget->outlineAtomics()) {
20459       // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far.
20460       // Don't outline them unless
20461       // (1) high level <atomic> support approved:
20462       //   http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf
20463       // (2) low level libgcc and compiler-rt support implemented by:
20464       //   min/max outline atomics helpers
20465       if (AI->getOperation() != AtomicRMWInst::Min &&
20466           AI->getOperation() != AtomicRMWInst::Max &&
20467           AI->getOperation() != AtomicRMWInst::UMin &&
20468           AI->getOperation() != AtomicRMWInst::UMax) {
20469         return AtomicExpansionKind::None;
20470       }
20471     }
20472   }
20473 
20474   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20475   // implement atomicrmw without spilling. If the target address is also on the
20476   // stack and close enough to the spill slot, this can lead to a situation
20477   // where the monitor always gets cleared and the atomic operation can never
20478   // succeed. So at -O0 lower this operation to a CAS loop.
20479   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20480     return AtomicExpansionKind::CmpXChg;
20481 
20482   return AtomicExpansionKind::LLSC;
20483 }
20484 
20485 TargetLowering::AtomicExpansionKind
20486 AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
20487     AtomicCmpXchgInst *AI) const {
20488   // If subtarget has LSE, leave cmpxchg intact for codegen.
20489   if (Subtarget->hasLSE() || Subtarget->outlineAtomics())
20490     return AtomicExpansionKind::None;
20491   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20492   // implement cmpxchg without spilling. If the address being exchanged is also
20493   // on the stack and close enough to the spill slot, this can lead to a
20494   // situation where the monitor always gets cleared and the atomic operation
20495   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
20496   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20497     return AtomicExpansionKind::None;
20498 
20499   // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand
20500   // it.
20501   unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
20502   if (Size > 64)
20503     return AtomicExpansionKind::None;
20504 
20505   return AtomicExpansionKind::LLSC;
20506 }
20507 
20508 Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
20509                                              Type *ValueTy, Value *Addr,
20510                                              AtomicOrdering Ord) const {
20511   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20512   bool IsAcquire = isAcquireOrStronger(Ord);
20513 
20514   // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
20515   // intrinsic must return {i64, i64} and we have to recombine them into a
20516   // single i128 here.
20517   if (ValueTy->getPrimitiveSizeInBits() == 128) {
20518     Intrinsic::ID Int =
20519         IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
20520     Function *Ldxr = Intrinsic::getDeclaration(M, Int);
20521 
20522     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20523     Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
20524 
20525     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
20526     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
20527     Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
20528     Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
20529     return Builder.CreateOr(
20530         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64");
20531   }
20532 
20533   Type *Tys[] = { Addr->getType() };
20534   Intrinsic::ID Int =
20535       IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
20536   Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
20537 
20538   const DataLayout &DL = M->getDataLayout();
20539   IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
20540   CallInst *CI = Builder.CreateCall(Ldxr, Addr);
20541   CI->addParamAttr(
20542       0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
20543   Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
20544 
20545   return Builder.CreateBitCast(Trunc, ValueTy);
20546 }
20547 
20548 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
20549     IRBuilderBase &Builder) const {
20550   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20551   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
20552 }
20553 
20554 Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
20555                                                    Value *Val, Value *Addr,
20556                                                    AtomicOrdering Ord) const {
20557   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20558   bool IsRelease = isReleaseOrStronger(Ord);
20559 
20560   // Since the intrinsics must have legal type, the i128 intrinsics take two
20561   // parameters: "i64, i64". We must marshal Val into the appropriate form
20562   // before the call.
20563   if (Val->getType()->getPrimitiveSizeInBits() == 128) {
20564     Intrinsic::ID Int =
20565         IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
20566     Function *Stxr = Intrinsic::getDeclaration(M, Int);
20567     Type *Int64Ty = Type::getInt64Ty(M->getContext());
20568 
20569     Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
20570     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
20571     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20572     return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
20573   }
20574 
20575   Intrinsic::ID Int =
20576       IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
20577   Type *Tys[] = { Addr->getType() };
20578   Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
20579 
20580   const DataLayout &DL = M->getDataLayout();
20581   IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
20582   Val = Builder.CreateBitCast(Val, IntValTy);
20583 
20584   CallInst *CI = Builder.CreateCall(
20585       Stxr, {Builder.CreateZExtOrBitCast(
20586                  Val, Stxr->getFunctionType()->getParamType(0)),
20587              Addr});
20588   CI->addParamAttr(1, Attribute::get(Builder.getContext(),
20589                                      Attribute::ElementType, Val->getType()));
20590   return CI;
20591 }
20592 
20593 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
20594     Type *Ty, CallingConv::ID CallConv, bool isVarArg,
20595     const DataLayout &DL) const {
20596   if (!Ty->isArrayTy()) {
20597     const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
20598     return TySize.isScalable() && TySize.getKnownMinSize() > 128;
20599   }
20600 
20601   // All non aggregate members of the type must have the same type
20602   SmallVector<EVT> ValueVTs;
20603   ComputeValueVTs(*this, DL, Ty, ValueVTs);
20604   return is_splat(ValueVTs);
20605 }
20606 
20607 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
20608                                                             EVT) const {
20609   return false;
20610 }
20611 
20612 static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
20613   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
20614   Function *ThreadPointerFunc =
20615       Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
20616   return IRB.CreatePointerCast(
20617       IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
20618                              Offset),
20619       IRB.getInt8PtrTy()->getPointerTo(0));
20620 }
20621 
20622 Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
20623   // Android provides a fixed TLS slot for the stack cookie. See the definition
20624   // of TLS_SLOT_STACK_GUARD in
20625   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20626   if (Subtarget->isTargetAndroid())
20627     return UseTlsOffset(IRB, 0x28);
20628 
20629   // Fuchsia is similar.
20630   // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
20631   if (Subtarget->isTargetFuchsia())
20632     return UseTlsOffset(IRB, -0x10);
20633 
20634   return TargetLowering::getIRStackGuard(IRB);
20635 }
20636 
20637 void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
20638   // MSVC CRT provides functionalities for stack protection.
20639   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
20640     // MSVC CRT has a global variable holding security cookie.
20641     M.getOrInsertGlobal("__security_cookie",
20642                         Type::getInt8PtrTy(M.getContext()));
20643 
20644     // MSVC CRT has a function to validate security cookie.
20645     FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
20646         "__security_check_cookie", Type::getVoidTy(M.getContext()),
20647         Type::getInt8PtrTy(M.getContext()));
20648     if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
20649       F->setCallingConv(CallingConv::Win64);
20650       F->addParamAttr(0, Attribute::AttrKind::InReg);
20651     }
20652     return;
20653   }
20654   TargetLowering::insertSSPDeclarations(M);
20655 }
20656 
20657 Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
20658   // MSVC CRT has a global variable holding security cookie.
20659   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20660     return M.getGlobalVariable("__security_cookie");
20661   return TargetLowering::getSDagStackGuard(M);
20662 }
20663 
20664 Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
20665   // MSVC CRT has a function to validate security cookie.
20666   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20667     return M.getFunction("__security_check_cookie");
20668   return TargetLowering::getSSPStackGuardCheck(M);
20669 }
20670 
20671 Value *
20672 AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
20673   // Android provides a fixed TLS slot for the SafeStack pointer. See the
20674   // definition of TLS_SLOT_SAFESTACK in
20675   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20676   if (Subtarget->isTargetAndroid())
20677     return UseTlsOffset(IRB, 0x48);
20678 
20679   // Fuchsia is similar.
20680   // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
20681   if (Subtarget->isTargetFuchsia())
20682     return UseTlsOffset(IRB, -0x8);
20683 
20684   return TargetLowering::getSafeStackPointerLocation(IRB);
20685 }
20686 
20687 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
20688     const Instruction &AndI) const {
20689   // Only sink 'and' mask to cmp use block if it is masking a single bit, since
20690   // this is likely to be fold the and/cmp/br into a single tbz instruction.  It
20691   // may be beneficial to sink in other cases, but we would have to check that
20692   // the cmp would not get folded into the br to form a cbz for these to be
20693   // beneficial.
20694   ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
20695   if (!Mask)
20696     return false;
20697   return Mask->getValue().isPowerOf2();
20698 }
20699 
20700 bool AArch64TargetLowering::
20701     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20702         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
20703         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
20704         SelectionDAG &DAG) const {
20705   // Does baseline recommend not to perform the fold by default?
20706   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20707           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
20708     return false;
20709   // Else, if this is a vector shift, prefer 'shl'.
20710   return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL;
20711 }
20712 
20713 bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
20714                                               SDNode *N) const {
20715   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
20716       !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin())
20717     return false;
20718   return true;
20719 }
20720 
20721 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
20722   // Update IsSplitCSR in AArch64unctionInfo.
20723   AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
20724   AFI->setIsSplitCSR(true);
20725 }
20726 
20727 void AArch64TargetLowering::insertCopiesSplitCSR(
20728     MachineBasicBlock *Entry,
20729     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
20730   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
20731   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
20732   if (!IStart)
20733     return;
20734 
20735   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20736   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
20737   MachineBasicBlock::iterator MBBI = Entry->begin();
20738   for (const MCPhysReg *I = IStart; *I; ++I) {
20739     const TargetRegisterClass *RC = nullptr;
20740     if (AArch64::GPR64RegClass.contains(*I))
20741       RC = &AArch64::GPR64RegClass;
20742     else if (AArch64::FPR64RegClass.contains(*I))
20743       RC = &AArch64::FPR64RegClass;
20744     else
20745       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
20746 
20747     Register NewVR = MRI->createVirtualRegister(RC);
20748     // Create copy from CSR to a virtual register.
20749     // FIXME: this currently does not emit CFI pseudo-instructions, it works
20750     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
20751     // nounwind. If we want to generalize this later, we may need to emit
20752     // CFI pseudo-instructions.
20753     assert(Entry->getParent()->getFunction().hasFnAttribute(
20754                Attribute::NoUnwind) &&
20755            "Function should be nounwind in insertCopiesSplitCSR!");
20756     Entry->addLiveIn(*I);
20757     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
20758         .addReg(*I);
20759 
20760     // Insert the copy-back instructions right before the terminator.
20761     for (auto *Exit : Exits)
20762       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
20763               TII->get(TargetOpcode::COPY), *I)
20764           .addReg(NewVR);
20765   }
20766 }
20767 
20768 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
20769   // Integer division on AArch64 is expensive. However, when aggressively
20770   // optimizing for code size, we prefer to use a div instruction, as it is
20771   // usually smaller than the alternative sequence.
20772   // The exception to this is vector division. Since AArch64 doesn't have vector
20773   // integer division, leaving the division as-is is a loss even in terms of
20774   // size, because it will have to be scalarized, while the alternative code
20775   // sequence can be performed in vector form.
20776   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
20777   return OptSize && !VT.isVector();
20778 }
20779 
20780 bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
20781   // We want inc-of-add for scalars and sub-of-not for vectors.
20782   return VT.isScalarInteger();
20783 }
20784 
20785 bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
20786                                                  EVT VT) const {
20787   // v8f16 without fp16 need to be extended to v8f32, which is more difficult to
20788   // legalize.
20789   if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16())
20790     return false;
20791   return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
20792 }
20793 
20794 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
20795   return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
20796 }
20797 
20798 unsigned
20799 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
20800   if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
20801     return getPointerTy(DL).getSizeInBits();
20802 
20803   return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
20804 }
20805 
20806 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
20807   MachineFrameInfo &MFI = MF.getFrameInfo();
20808   // If we have any vulnerable SVE stack objects then the stack protector
20809   // needs to be placed at the top of the SVE stack area, as the SVE locals
20810   // are placed above the other locals, so we allocate it as if it were a
20811   // scalable vector.
20812   // FIXME: It may be worthwhile having a specific interface for this rather
20813   // than doing it here in finalizeLowering.
20814   if (MFI.hasStackProtectorIndex()) {
20815     for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
20816       if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
20817           MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
20818         MFI.setStackID(MFI.getStackProtectorIndex(),
20819                        TargetStackID::ScalableVector);
20820         MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16));
20821         break;
20822       }
20823     }
20824   }
20825   MFI.computeMaxCallFrameSize(MF);
20826   TargetLoweringBase::finalizeLowering(MF);
20827 }
20828 
20829 // Unlike X86, we let frame lowering assign offsets to all catch objects.
20830 bool AArch64TargetLowering::needsFixedCatchObjects() const {
20831   return false;
20832 }
20833 
20834 bool AArch64TargetLowering::shouldLocalize(
20835     const MachineInstr &MI, const TargetTransformInfo *TTI) const {
20836   switch (MI.getOpcode()) {
20837   case TargetOpcode::G_GLOBAL_VALUE: {
20838     // On Darwin, TLS global vars get selected into function calls, which
20839     // we don't want localized, as they can get moved into the middle of a
20840     // another call sequence.
20841     const GlobalValue &GV = *MI.getOperand(1).getGlobal();
20842     if (GV.isThreadLocal() && Subtarget->isTargetMachO())
20843       return false;
20844     break;
20845   }
20846   // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being
20847   // localizable.
20848   case AArch64::ADRP:
20849   case AArch64::G_ADD_LOW:
20850     return true;
20851   default:
20852     break;
20853   }
20854   return TargetLoweringBase::shouldLocalize(MI, TTI);
20855 }
20856 
20857 bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
20858   if (isa<ScalableVectorType>(Inst.getType()))
20859     return true;
20860 
20861   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
20862     if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
20863       return true;
20864 
20865   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
20866     if (isa<ScalableVectorType>(AI->getAllocatedType()))
20867       return true;
20868   }
20869 
20870   return false;
20871 }
20872 
20873 // Return the largest legal scalable vector type that matches VT's element type.
20874 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
20875   assert(VT.isFixedLengthVector() &&
20876          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20877          "Expected legal fixed length vector!");
20878   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20879   default:
20880     llvm_unreachable("unexpected element type for SVE container");
20881   case MVT::i8:
20882     return EVT(MVT::nxv16i8);
20883   case MVT::i16:
20884     return EVT(MVT::nxv8i16);
20885   case MVT::i32:
20886     return EVT(MVT::nxv4i32);
20887   case MVT::i64:
20888     return EVT(MVT::nxv2i64);
20889   case MVT::f16:
20890     return EVT(MVT::nxv8f16);
20891   case MVT::f32:
20892     return EVT(MVT::nxv4f32);
20893   case MVT::f64:
20894     return EVT(MVT::nxv2f64);
20895   }
20896 }
20897 
20898 // Return a PTRUE with active lanes corresponding to the extent of VT.
20899 static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
20900                                                 EVT VT) {
20901   assert(VT.isFixedLengthVector() &&
20902          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20903          "Expected legal fixed length vector!");
20904 
20905   Optional<unsigned> PgPattern =
20906       getSVEPredPatternFromNumElements(VT.getVectorNumElements());
20907   assert(PgPattern && "Unexpected element count for SVE predicate");
20908 
20909   // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
20910   // AArch64SVEPredPattern::all, which can enable the use of unpredicated
20911   // variants of instructions when available.
20912   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
20913   unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
20914   unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
20915   if (MaxSVESize && MinSVESize == MaxSVESize &&
20916       MaxSVESize == VT.getSizeInBits())
20917     PgPattern = AArch64SVEPredPattern::all;
20918 
20919   MVT MaskVT;
20920   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20921   default:
20922     llvm_unreachable("unexpected element type for SVE predicate");
20923   case MVT::i8:
20924     MaskVT = MVT::nxv16i1;
20925     break;
20926   case MVT::i16:
20927   case MVT::f16:
20928     MaskVT = MVT::nxv8i1;
20929     break;
20930   case MVT::i32:
20931   case MVT::f32:
20932     MaskVT = MVT::nxv4i1;
20933     break;
20934   case MVT::i64:
20935   case MVT::f64:
20936     MaskVT = MVT::nxv2i1;
20937     break;
20938   }
20939 
20940   return getPTrue(DAG, DL, MaskVT, *PgPattern);
20941 }
20942 
20943 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
20944                                              EVT VT) {
20945   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20946          "Expected legal scalable vector!");
20947   auto PredTy = VT.changeVectorElementType(MVT::i1);
20948   return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
20949 }
20950 
20951 static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
20952   if (VT.isFixedLengthVector())
20953     return getPredicateForFixedLengthVector(DAG, DL, VT);
20954 
20955   return getPredicateForScalableVector(DAG, DL, VT);
20956 }
20957 
20958 // Grow V to consume an entire SVE register.
20959 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20960   assert(VT.isScalableVector() &&
20961          "Expected to convert into a scalable vector!");
20962   assert(V.getValueType().isFixedLengthVector() &&
20963          "Expected a fixed length vector operand!");
20964   SDLoc DL(V);
20965   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20966   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
20967 }
20968 
20969 // Shrink V so it's just big enough to maintain a VT's worth of data.
20970 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20971   assert(VT.isFixedLengthVector() &&
20972          "Expected to convert into a fixed length vector!");
20973   assert(V.getValueType().isScalableVector() &&
20974          "Expected a scalable vector operand!");
20975   SDLoc DL(V);
20976   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20977   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
20978 }
20979 
20980 // Convert all fixed length vector loads larger than NEON to masked_loads.
20981 SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
20982     SDValue Op, SelectionDAG &DAG) const {
20983   auto Load = cast<LoadSDNode>(Op);
20984 
20985   SDLoc DL(Op);
20986   EVT VT = Op.getValueType();
20987   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20988   EVT LoadVT = ContainerVT;
20989   EVT MemVT = Load->getMemoryVT();
20990 
20991   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
20992 
20993   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
20994     LoadVT = ContainerVT.changeTypeToInteger();
20995     MemVT = MemVT.changeTypeToInteger();
20996   }
20997 
20998   SDValue NewLoad = DAG.getMaskedLoad(
20999       LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg,
21000       DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(),
21001       Load->getAddressingMode(), Load->getExtensionType());
21002 
21003   SDValue Result = NewLoad;
21004   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
21005     EVT ExtendVT = ContainerVT.changeVectorElementType(
21006         Load->getMemoryVT().getVectorElementType());
21007 
21008     Result = getSVESafeBitCast(ExtendVT, Result, DAG);
21009     Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21010                          Pg, Result, DAG.getUNDEF(ContainerVT));
21011   }
21012 
21013   Result = convertFromScalableVector(DAG, VT, Result);
21014   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
21015   return DAG.getMergeValues(MergedValues, DL);
21016 }
21017 
21018 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
21019                                                 SelectionDAG &DAG) {
21020   SDLoc DL(Mask);
21021   EVT InVT = Mask.getValueType();
21022   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21023 
21024   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21025 
21026   if (ISD::isBuildVectorAllOnes(Mask.getNode()))
21027     return Pg;
21028 
21029   auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask);
21030   auto Op2 = DAG.getConstant(0, DL, ContainerVT);
21031 
21032   return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(),
21033                      {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)});
21034 }
21035 
21036 // Convert all fixed length vector loads larger than NEON to masked_loads.
21037 SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
21038     SDValue Op, SelectionDAG &DAG) const {
21039   auto Load = cast<MaskedLoadSDNode>(Op);
21040 
21041   SDLoc DL(Op);
21042   EVT VT = Op.getValueType();
21043   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21044 
21045   SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG);
21046 
21047   SDValue PassThru;
21048   bool IsPassThruZeroOrUndef = false;
21049 
21050   if (Load->getPassThru()->isUndef()) {
21051     PassThru = DAG.getUNDEF(ContainerVT);
21052     IsPassThruZeroOrUndef = true;
21053   } else {
21054     if (ContainerVT.isInteger())
21055       PassThru = DAG.getConstant(0, DL, ContainerVT);
21056     else
21057       PassThru = DAG.getConstantFP(0, DL, ContainerVT);
21058     if (isZerosVector(Load->getPassThru().getNode()))
21059       IsPassThruZeroOrUndef = true;
21060   }
21061 
21062   SDValue NewLoad = DAG.getMaskedLoad(
21063       ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(),
21064       Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(),
21065       Load->getAddressingMode(), Load->getExtensionType());
21066 
21067   SDValue Result = NewLoad;
21068   if (!IsPassThruZeroOrUndef) {
21069     SDValue OldPassThru =
21070         convertToScalableVector(DAG, ContainerVT, Load->getPassThru());
21071     Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru);
21072   }
21073 
21074   Result = convertFromScalableVector(DAG, VT, Result);
21075   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
21076   return DAG.getMergeValues(MergedValues, DL);
21077 }
21078 
21079 // Convert all fixed length vector stores larger than NEON to masked_stores.
21080 SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
21081     SDValue Op, SelectionDAG &DAG) const {
21082   auto Store = cast<StoreSDNode>(Op);
21083 
21084   SDLoc DL(Op);
21085   EVT VT = Store->getValue().getValueType();
21086   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21087   EVT MemVT = Store->getMemoryVT();
21088 
21089   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
21090   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
21091 
21092   if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
21093     EVT TruncVT = ContainerVT.changeVectorElementType(
21094         Store->getMemoryVT().getVectorElementType());
21095     MemVT = MemVT.changeTypeToInteger();
21096     NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
21097                            NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
21098                            DAG.getUNDEF(TruncVT));
21099     NewValue =
21100         getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
21101   }
21102 
21103   return DAG.getMaskedStore(Store->getChain(), DL, NewValue,
21104                             Store->getBasePtr(), Store->getOffset(), Pg, MemVT,
21105                             Store->getMemOperand(), Store->getAddressingMode(),
21106                             Store->isTruncatingStore());
21107 }
21108 
21109 SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
21110     SDValue Op, SelectionDAG &DAG) const {
21111   auto *Store = cast<MaskedStoreSDNode>(Op);
21112 
21113   SDLoc DL(Op);
21114   EVT VT = Store->getValue().getValueType();
21115   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21116 
21117   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
21118   SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG);
21119 
21120   return DAG.getMaskedStore(
21121       Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(),
21122       Mask, Store->getMemoryVT(), Store->getMemOperand(),
21123       Store->getAddressingMode(), Store->isTruncatingStore());
21124 }
21125 
21126 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
21127     SDValue Op, SelectionDAG &DAG) const {
21128   SDLoc dl(Op);
21129   EVT VT = Op.getValueType();
21130   EVT EltVT = VT.getVectorElementType();
21131 
21132   bool Signed = Op.getOpcode() == ISD::SDIV;
21133   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
21134 
21135   bool Negated;
21136   uint64_t SplatVal;
21137   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
21138     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21139     SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21140     SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32);
21141 
21142     SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT);
21143     SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2);
21144     if (Negated)
21145       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
21146 
21147     return convertFromScalableVector(DAG, VT, Res);
21148   }
21149 
21150   // Scalable vector i32/i64 DIV is supported.
21151   if (EltVT == MVT::i32 || EltVT == MVT::i64)
21152     return LowerToPredicatedOp(Op, DAG, PredOpcode);
21153 
21154   // Scalable vector i8/i16 DIV is not supported. Promote it to i32.
21155   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21156   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
21157   EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext());
21158   EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT);
21159 
21160   // If this is not a full vector, extend, div, and truncate it.
21161   EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext());
21162   if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) {
21163     unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
21164     SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0));
21165     SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1));
21166     SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1);
21167     return DAG.getNode(ISD::TRUNCATE, dl, VT, Div);
21168   }
21169 
21170   // Convert the operands to scalable vectors.
21171   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21172   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21173 
21174   // Extend the scalable operands.
21175   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
21176   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
21177   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0);
21178   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1);
21179   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0);
21180   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1);
21181 
21182   // Convert back to fixed vectors so the DIV can be further lowered.
21183   Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo);
21184   Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo);
21185   Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi);
21186   Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi);
21187   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21188                                  Op0Lo, Op1Lo);
21189   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
21190                                  Op0Hi, Op1Hi);
21191 
21192   // Convert again to scalable vectors to truncate.
21193   ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo);
21194   ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi);
21195   SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT,
21196                                        ResultLo, ResultHi);
21197 
21198   return convertFromScalableVector(DAG, VT, ScalableResult);
21199 }
21200 
21201 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
21202     SDValue Op, SelectionDAG &DAG) const {
21203   EVT VT = Op.getValueType();
21204   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21205 
21206   SDLoc DL(Op);
21207   SDValue Val = Op.getOperand(0);
21208   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21209   Val = convertToScalableVector(DAG, ContainerVT, Val);
21210 
21211   bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND;
21212   unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
21213 
21214   // Repeatedly unpack Val until the result is of the desired element type.
21215   switch (ContainerVT.getSimpleVT().SimpleTy) {
21216   default:
21217     llvm_unreachable("unimplemented container type");
21218   case MVT::nxv16i8:
21219     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
21220     if (VT.getVectorElementType() == MVT::i16)
21221       break;
21222     LLVM_FALLTHROUGH;
21223   case MVT::nxv8i16:
21224     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
21225     if (VT.getVectorElementType() == MVT::i32)
21226       break;
21227     LLVM_FALLTHROUGH;
21228   case MVT::nxv4i32:
21229     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
21230     assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
21231     break;
21232   }
21233 
21234   return convertFromScalableVector(DAG, VT, Val);
21235 }
21236 
21237 SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
21238     SDValue Op, SelectionDAG &DAG) const {
21239   EVT VT = Op.getValueType();
21240   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21241 
21242   SDLoc DL(Op);
21243   SDValue Val = Op.getOperand(0);
21244   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
21245   Val = convertToScalableVector(DAG, ContainerVT, Val);
21246 
21247   // Repeatedly truncate Val until the result is of the desired element type.
21248   switch (ContainerVT.getSimpleVT().SimpleTy) {
21249   default:
21250     llvm_unreachable("unimplemented container type");
21251   case MVT::nxv2i64:
21252     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val);
21253     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
21254     if (VT.getVectorElementType() == MVT::i32)
21255       break;
21256     LLVM_FALLTHROUGH;
21257   case MVT::nxv4i32:
21258     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
21259     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
21260     if (VT.getVectorElementType() == MVT::i16)
21261       break;
21262     LLVM_FALLTHROUGH;
21263   case MVT::nxv8i16:
21264     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
21265     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
21266     assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!");
21267     break;
21268   }
21269 
21270   return convertFromScalableVector(DAG, VT, Val);
21271 }
21272 
21273 SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
21274     SDValue Op, SelectionDAG &DAG) const {
21275   EVT VT = Op.getValueType();
21276   EVT InVT = Op.getOperand(0).getValueType();
21277   assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!");
21278 
21279   SDLoc DL(Op);
21280   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21281   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21282 
21283   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1));
21284 }
21285 
21286 SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
21287     SDValue Op, SelectionDAG &DAG) const {
21288   EVT VT = Op.getValueType();
21289   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21290 
21291   SDLoc DL(Op);
21292   EVT InVT = Op.getOperand(0).getValueType();
21293   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21294   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21295 
21296   auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0,
21297                                  Op.getOperand(1), Op.getOperand(2));
21298 
21299   return convertFromScalableVector(DAG, VT, ScalableRes);
21300 }
21301 
21302 // Convert vector operation 'Op' to an equivalent predicated operation whereby
21303 // the original operation's type is used to construct a suitable predicate.
21304 // NOTE: The results for inactive lanes are undefined.
21305 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
21306                                                    SelectionDAG &DAG,
21307                                                    unsigned NewOp) const {
21308   EVT VT = Op.getValueType();
21309   SDLoc DL(Op);
21310   auto Pg = getPredicateForVector(DAG, DL, VT);
21311 
21312   if (VT.isFixedLengthVector()) {
21313     assert(isTypeLegal(VT) && "Expected only legal fixed-width types");
21314     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21315 
21316     // Create list of operands by converting existing ones to scalable types.
21317     SmallVector<SDValue, 4> Operands = {Pg};
21318     for (const SDValue &V : Op->op_values()) {
21319       if (isa<CondCodeSDNode>(V)) {
21320         Operands.push_back(V);
21321         continue;
21322       }
21323 
21324       if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
21325         EVT VTArg = VTNode->getVT().getVectorElementType();
21326         EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
21327         Operands.push_back(DAG.getValueType(NewVTArg));
21328         continue;
21329       }
21330 
21331       assert(isTypeLegal(V.getValueType()) &&
21332              "Expected only legal fixed-width types");
21333       Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
21334     }
21335 
21336     if (isMergePassthruOpcode(NewOp))
21337       Operands.push_back(DAG.getUNDEF(ContainerVT));
21338 
21339     auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
21340     return convertFromScalableVector(DAG, VT, ScalableRes);
21341   }
21342 
21343   assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
21344 
21345   SmallVector<SDValue, 4> Operands = {Pg};
21346   for (const SDValue &V : Op->op_values()) {
21347     assert((!V.getValueType().isVector() ||
21348             V.getValueType().isScalableVector()) &&
21349            "Only scalable vectors are supported!");
21350     Operands.push_back(V);
21351   }
21352 
21353   if (isMergePassthruOpcode(NewOp))
21354     Operands.push_back(DAG.getUNDEF(VT));
21355 
21356   return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags());
21357 }
21358 
21359 // If a fixed length vector operation has no side effects when applied to
21360 // undefined elements, we can safely use scalable vectors to perform the same
21361 // operation without needing to worry about predication.
21362 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
21363                                                  SelectionDAG &DAG) const {
21364   EVT VT = Op.getValueType();
21365   assert(useSVEForFixedLengthVectorVT(VT) &&
21366          "Only expected to lower fixed length vector operation!");
21367   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21368 
21369   // Create list of operands by converting existing ones to scalable types.
21370   SmallVector<SDValue, 4> Ops;
21371   for (const SDValue &V : Op->op_values()) {
21372     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
21373 
21374     // Pass through non-vector operands.
21375     if (!V.getValueType().isVector()) {
21376       Ops.push_back(V);
21377       continue;
21378     }
21379 
21380     // "cast" fixed length vector to a scalable vector.
21381     assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
21382            "Only fixed length vectors are supported!");
21383     Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
21384   }
21385 
21386   auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops);
21387   return convertFromScalableVector(DAG, VT, ScalableRes);
21388 }
21389 
21390 SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
21391     SelectionDAG &DAG) const {
21392   SDLoc DL(ScalarOp);
21393   SDValue AccOp = ScalarOp.getOperand(0);
21394   SDValue VecOp = ScalarOp.getOperand(1);
21395   EVT SrcVT = VecOp.getValueType();
21396   EVT ResVT = SrcVT.getVectorElementType();
21397 
21398   EVT ContainerVT = SrcVT;
21399   if (SrcVT.isFixedLengthVector()) {
21400     ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21401     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21402   }
21403 
21404   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21405   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21406 
21407   // Convert operands to Scalable.
21408   AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
21409                       DAG.getUNDEF(ContainerVT), AccOp, Zero);
21410 
21411   // Perform reduction.
21412   SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
21413                             Pg, AccOp, VecOp);
21414 
21415   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero);
21416 }
21417 
21418 SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
21419                                                        SelectionDAG &DAG) const {
21420   SDLoc DL(ReduceOp);
21421   SDValue Op = ReduceOp.getOperand(0);
21422   EVT OpVT = Op.getValueType();
21423   EVT VT = ReduceOp.getValueType();
21424 
21425   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
21426     return SDValue();
21427 
21428   SDValue Pg = getPredicateForVector(DAG, DL, OpVT);
21429 
21430   switch (ReduceOp.getOpcode()) {
21431   default:
21432     return SDValue();
21433   case ISD::VECREDUCE_OR:
21434     if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1)
21435       // The predicate can be 'Op' because
21436       // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
21437       return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
21438     else
21439       return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
21440   case ISD::VECREDUCE_AND: {
21441     Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
21442     return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
21443   }
21444   case ISD::VECREDUCE_XOR: {
21445     SDValue ID =
21446         DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64);
21447     if (OpVT == MVT::nxv1i1) {
21448       // Emulate a CNTP on .Q using .D and a different governing predicate.
21449       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg);
21450       Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op);
21451     }
21452     SDValue Cntp =
21453         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op);
21454     return DAG.getAnyExtOrTrunc(Cntp, DL, VT);
21455   }
21456   }
21457 
21458   return SDValue();
21459 }
21460 
21461 SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
21462                                                    SDValue ScalarOp,
21463                                                    SelectionDAG &DAG) const {
21464   SDLoc DL(ScalarOp);
21465   SDValue VecOp = ScalarOp.getOperand(0);
21466   EVT SrcVT = VecOp.getValueType();
21467 
21468   if (useSVEForFixedLengthVectorVT(
21469           SrcVT,
21470           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
21471     EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21472     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21473   }
21474 
21475   // UADDV always returns an i64 result.
21476   EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
21477                                                    SrcVT.getVectorElementType();
21478   EVT RdxVT = SrcVT;
21479   if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
21480     RdxVT = getPackedSVEVectorVT(ResVT);
21481 
21482   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21483   SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
21484   SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
21485                             Rdx, DAG.getConstant(0, DL, MVT::i64));
21486 
21487   // The VEC_REDUCE nodes expect an element size result.
21488   if (ResVT != ScalarOp.getValueType())
21489     Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
21490 
21491   return Res;
21492 }
21493 
21494 SDValue
21495 AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
21496     SelectionDAG &DAG) const {
21497   EVT VT = Op.getValueType();
21498   SDLoc DL(Op);
21499 
21500   EVT InVT = Op.getOperand(1).getValueType();
21501   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21502   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1));
21503   SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2));
21504 
21505   // Convert the mask to a predicated (NOTE: We don't need to worry about
21506   // inactive lanes since VSELECT is safe when given undefined elements).
21507   EVT MaskVT = Op.getOperand(0).getValueType();
21508   EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
21509   auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
21510   Mask = DAG.getNode(ISD::TRUNCATE, DL,
21511                      MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
21512 
21513   auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
21514                                 Mask, Op1, Op2);
21515 
21516   return convertFromScalableVector(DAG, VT, ScalableRes);
21517 }
21518 
21519 SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
21520     SDValue Op, SelectionDAG &DAG) const {
21521   SDLoc DL(Op);
21522   EVT InVT = Op.getOperand(0).getValueType();
21523   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21524 
21525   assert(useSVEForFixedLengthVectorVT(InVT) &&
21526          "Only expected to lower fixed length vector operation!");
21527   assert(Op.getValueType() == InVT.changeTypeToInteger() &&
21528          "Expected integer result of the same bit length as the inputs!");
21529 
21530   auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21531   auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21532   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21533 
21534   EVT CmpVT = Pg.getValueType();
21535   auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT,
21536                          {Pg, Op1, Op2, Op.getOperand(2)});
21537 
21538   EVT PromoteVT = ContainerVT.changeTypeToInteger();
21539   auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT);
21540   return convertFromScalableVector(DAG, Op.getValueType(), Promote);
21541 }
21542 
21543 SDValue
21544 AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
21545                                                     SelectionDAG &DAG) const {
21546   SDLoc DL(Op);
21547   auto SrcOp = Op.getOperand(0);
21548   EVT VT = Op.getValueType();
21549   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21550   EVT ContainerSrcVT =
21551       getContainerForFixedLengthVector(DAG, SrcOp.getValueType());
21552 
21553   SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp);
21554   Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp);
21555   return convertFromScalableVector(DAG, VT, Op);
21556 }
21557 
21558 SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
21559     SDValue Op, SelectionDAG &DAG) const {
21560   SDLoc DL(Op);
21561   unsigned NumOperands = Op->getNumOperands();
21562 
21563   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
21564          "Unexpected number of operands in CONCAT_VECTORS");
21565 
21566   auto SrcOp1 = Op.getOperand(0);
21567   auto SrcOp2 = Op.getOperand(1);
21568   EVT VT = Op.getValueType();
21569   EVT SrcVT = SrcOp1.getValueType();
21570 
21571   if (NumOperands > 2) {
21572     SmallVector<SDValue, 4> Ops;
21573     EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
21574     for (unsigned I = 0; I < NumOperands; I += 2)
21575       Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT,
21576                                 Op->getOperand(I), Op->getOperand(I + 1)));
21577 
21578     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
21579   }
21580 
21581   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21582 
21583   SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
21584   SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1);
21585   SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2);
21586 
21587   Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2);
21588 
21589   return convertFromScalableVector(DAG, VT, Op);
21590 }
21591 
21592 SDValue
21593 AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
21594                                                      SelectionDAG &DAG) const {
21595   EVT VT = Op.getValueType();
21596   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21597 
21598   SDLoc DL(Op);
21599   SDValue Val = Op.getOperand(0);
21600   SDValue Pg = getPredicateForVector(DAG, DL, VT);
21601   EVT SrcVT = Val.getValueType();
21602   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21603   EVT ExtendVT = ContainerVT.changeVectorElementType(
21604       SrcVT.getVectorElementType());
21605 
21606   Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21607   Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
21608 
21609   Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val);
21610   Val = getSVESafeBitCast(ExtendVT, Val, DAG);
21611   Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21612                     Pg, Val, DAG.getUNDEF(ContainerVT));
21613 
21614   return convertFromScalableVector(DAG, VT, Val);
21615 }
21616 
21617 SDValue
21618 AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
21619                                                     SelectionDAG &DAG) const {
21620   EVT VT = Op.getValueType();
21621   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21622 
21623   SDLoc DL(Op);
21624   SDValue Val = Op.getOperand(0);
21625   EVT SrcVT = Val.getValueType();
21626   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21627   EVT RoundVT = ContainerSrcVT.changeVectorElementType(
21628       VT.getVectorElementType());
21629   SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
21630 
21631   Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21632   Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val,
21633                     Op.getOperand(1), DAG.getUNDEF(RoundVT));
21634   Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG);
21635   Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21636 
21637   Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21638   return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21639 }
21640 
21641 SDValue
21642 AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
21643                                                     SelectionDAG &DAG) const {
21644   EVT VT = Op.getValueType();
21645   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21646 
21647   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
21648   unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
21649                              : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
21650 
21651   SDLoc DL(Op);
21652   SDValue Val = Op.getOperand(0);
21653   EVT SrcVT = Val.getValueType();
21654   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21655   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21656 
21657   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21658       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21659     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21660 
21661     Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
21662                       VT.changeTypeToInteger(), Val);
21663 
21664     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21665     Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG);
21666     // Safe to use a larger than specified operand since we just unpacked the
21667     // data, hence the upper bits are zero.
21668     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21669                       DAG.getUNDEF(ContainerDstVT));
21670     return convertFromScalableVector(DAG, VT, Val);
21671   } else {
21672     EVT CvtVT = ContainerSrcVT.changeVectorElementType(
21673         ContainerDstVT.getVectorElementType());
21674     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21675 
21676     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21677     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21678     Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG);
21679     Val = convertFromScalableVector(DAG, SrcVT, Val);
21680 
21681     Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21682     return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21683   }
21684 }
21685 
21686 SDValue
21687 AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
21688                                                     SelectionDAG &DAG) const {
21689   EVT VT = Op.getValueType();
21690   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21691 
21692   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
21693   unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU
21694                              : AArch64ISD::FCVTZU_MERGE_PASSTHRU;
21695 
21696   SDLoc DL(Op);
21697   SDValue Val = Op.getOperand(0);
21698   EVT SrcVT = Val.getValueType();
21699   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21700   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21701 
21702   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21703       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21704     EVT CvtVT = ContainerDstVT.changeVectorElementType(
21705       ContainerSrcVT.getVectorElementType());
21706     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21707 
21708     Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21709     Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val);
21710 
21711     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21712     Val = getSVESafeBitCast(CvtVT, Val, DAG);
21713     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21714                       DAG.getUNDEF(ContainerDstVT));
21715     return convertFromScalableVector(DAG, VT, Val);
21716   } else {
21717     EVT CvtVT = ContainerSrcVT.changeTypeToInteger();
21718     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21719 
21720     // Safe to use a larger than specified result since an fp_to_int where the
21721     // result doesn't fit into the destination is undefined.
21722     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21723     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21724     Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21725 
21726     return DAG.getNode(ISD::TRUNCATE, DL, VT, Val);
21727   }
21728 }
21729 
21730 SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
21731     SDValue Op, SelectionDAG &DAG) const {
21732   EVT VT = Op.getValueType();
21733   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21734 
21735   auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
21736   auto ShuffleMask = SVN->getMask();
21737 
21738   SDLoc DL(Op);
21739   SDValue Op1 = Op.getOperand(0);
21740   SDValue Op2 = Op.getOperand(1);
21741 
21742   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21743   Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
21744   Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
21745 
21746   bool ReverseEXT = false;
21747   unsigned Imm;
21748   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
21749       Imm == VT.getVectorNumElements() - 1) {
21750     if (ReverseEXT)
21751       std::swap(Op1, Op2);
21752 
21753     EVT ScalarTy = VT.getVectorElementType();
21754     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
21755       ScalarTy = MVT::i32;
21756     SDValue Scalar = DAG.getNode(
21757         ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
21758         DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64));
21759     Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar);
21760     return convertFromScalableVector(DAG, VT, Op);
21761   }
21762 
21763   for (unsigned LaneSize : {64U, 32U, 16U}) {
21764     if (isREVMask(ShuffleMask, VT, LaneSize)) {
21765       EVT NewVT =
21766           getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize));
21767       unsigned RevOp;
21768       unsigned EltSz = VT.getScalarSizeInBits();
21769       if (EltSz == 8)
21770         RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU;
21771       else if (EltSz == 16)
21772         RevOp = AArch64ISD::REVH_MERGE_PASSTHRU;
21773       else
21774         RevOp = AArch64ISD::REVW_MERGE_PASSTHRU;
21775 
21776       Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
21777       Op = LowerToPredicatedOp(Op, DAG, RevOp);
21778       Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
21779       return convertFromScalableVector(DAG, VT, Op);
21780     }
21781   }
21782 
21783   unsigned WhichResult;
21784   if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21785     return convertFromScalableVector(
21786         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2));
21787 
21788   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
21789     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21790     return convertFromScalableVector(
21791         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21792   }
21793 
21794   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21795     return convertFromScalableVector(
21796         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1));
21797 
21798   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21799     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21800     return convertFromScalableVector(
21801         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21802   }
21803 
21804   // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask
21805   // represents the same logical operation as performed by a ZIP instruction. In
21806   // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly
21807   // equivalent to an AArch64 instruction. There's the extra component of
21808   // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions
21809   // only operated on 64/128bit vector types that have a direct mapping to a
21810   // target register and so an exact mapping is implied.
21811   // However, when using SVE for fixed length vectors, most legal vector types
21812   // are actually sub-vectors of a larger SVE register. When mapping
21813   // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider
21814   // how the mask's indices translate. Specifically, when the mapping requires
21815   // an exact meaning for a specific vector index (e.g. Index X is the last
21816   // vector element in the register) then such mappings are often only safe when
21817   // the exact SVE register size is know. The main exception to this is when
21818   // indices are logically relative to the first element of either
21819   // ISD::VECTOR_SHUFFLE operand because these relative indices don't change
21820   // when converting from fixed-length to scalable vector types (i.e. the start
21821   // of a fixed length vector is always the start of a scalable vector).
21822   unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
21823   unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
21824   if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
21825     if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) {
21826       Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
21827       return convertFromScalableVector(DAG, VT, Op);
21828     }
21829 
21830     if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21831       return convertFromScalableVector(
21832           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2));
21833 
21834     if (isUZPMask(ShuffleMask, VT, WhichResult)) {
21835       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21836       return convertFromScalableVector(
21837           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21838     }
21839 
21840     if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21841       return convertFromScalableVector(
21842           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1));
21843 
21844     if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21845       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21846       return convertFromScalableVector(
21847           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21848     }
21849   }
21850 
21851   return SDValue();
21852 }
21853 
21854 SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
21855                                                  SelectionDAG &DAG) const {
21856   SDLoc DL(Op);
21857   EVT InVT = Op.getValueType();
21858 
21859   assert(VT.isScalableVector() && isTypeLegal(VT) &&
21860          InVT.isScalableVector() && isTypeLegal(InVT) &&
21861          "Only expect to cast between legal scalable vector types!");
21862   assert(VT.getVectorElementType() != MVT::i1 &&
21863          InVT.getVectorElementType() != MVT::i1 &&
21864          "For predicate bitcasts, use getSVEPredicateBitCast");
21865 
21866   if (InVT == VT)
21867     return Op;
21868 
21869   EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType());
21870   EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType());
21871 
21872   // Safe bitcasting between unpacked vector types of different element counts
21873   // is currently unsupported because the following is missing the necessary
21874   // work to ensure the result's elements live where they're supposed to within
21875   // an SVE register.
21876   //                01234567
21877   // e.g. nxv2i32 = XX??XX??
21878   //      nxv4f16 = X?X?X?X?
21879   assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||
21880           VT == PackedVT || InVT == PackedInVT) &&
21881          "Unexpected bitcast!");
21882 
21883   // Pack input if required.
21884   if (InVT != PackedInVT)
21885     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
21886 
21887   Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
21888 
21889   // Unpack result if required.
21890   if (VT != PackedVT)
21891     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
21892 
21893   return Op;
21894 }
21895 
21896 bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
21897                                                  SDValue N) const {
21898   return ::isAllActivePredicate(DAG, N);
21899 }
21900 
21901 EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
21902   return ::getPromotedVTForPredicate(VT);
21903 }
21904 
21905 bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
21906     SDValue Op, const APInt &OriginalDemandedBits,
21907     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
21908     unsigned Depth) const {
21909 
21910   unsigned Opc = Op.getOpcode();
21911   switch (Opc) {
21912   case AArch64ISD::VSHL: {
21913     // Match (VSHL (VLSHR Val X) X)
21914     SDValue ShiftL = Op;
21915     SDValue ShiftR = Op->getOperand(0);
21916     if (ShiftR->getOpcode() != AArch64ISD::VLSHR)
21917       return false;
21918 
21919     if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse())
21920       return false;
21921 
21922     unsigned ShiftLBits = ShiftL->getConstantOperandVal(1);
21923     unsigned ShiftRBits = ShiftR->getConstantOperandVal(1);
21924 
21925     // Other cases can be handled as well, but this is not
21926     // implemented.
21927     if (ShiftRBits != ShiftLBits)
21928       return false;
21929 
21930     unsigned ScalarSize = Op.getScalarValueSizeInBits();
21931     assert(ScalarSize > ShiftLBits && "Invalid shift imm");
21932 
21933     APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits);
21934     APInt UnusedBits = ~OriginalDemandedBits;
21935 
21936     if ((ZeroBits & UnusedBits) != ZeroBits)
21937       return false;
21938 
21939     // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not
21940     // used - simplify to just Val.
21941     return TLO.CombineTo(Op, ShiftR->getOperand(0));
21942   }
21943   }
21944 
21945   return TargetLowering::SimplifyDemandedBitsForTargetNode(
21946       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
21947 }
21948 
21949 bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
21950   return Op.getOpcode() == AArch64ISD::DUP ||
21951          (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
21952           Op.getOperand(0).getOpcode() == AArch64ISD::DUP) ||
21953          TargetLowering::isTargetCanonicalConstantNode(Op);
21954 }
21955 
21956 bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
21957     unsigned Opc, LLT Ty1, LLT Ty2) const {
21958   return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
21959 }
21960