1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation  ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64TargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64ISelLowering.h"
14 #include "AArch64CallingConvention.h"
15 #include "AArch64ExpandImm.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64PerfectShuffle.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/ObjCARCUtil.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/CallingConvLower.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstr.h"
42 #include "llvm/CodeGen/MachineInstrBuilder.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/CodeGen/RuntimeLibcalls.h"
46 #include "llvm/CodeGen/SelectionDAG.h"
47 #include "llvm/CodeGen/SelectionDAGNodes.h"
48 #include "llvm/CodeGen/TargetCallingConv.h"
49 #include "llvm/CodeGen/TargetInstrInfo.h"
50 #include "llvm/CodeGen/ValueTypes.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GetElementPtrTypeIterator.h"
58 #include "llvm/IR/GlobalValue.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/IntrinsicsAArch64.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/OperandTraits.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/MC/MCRegisterInfo.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CodeGen.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Compiler.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/KnownBits.h"
79 #include "llvm/Support/MachineValueType.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetMachine.h"
83 #include "llvm/Target/TargetOptions.h"
84 #include <algorithm>
85 #include <bitset>
86 #include <cassert>
87 #include <cctype>
88 #include <cstdint>
89 #include <cstdlib>
90 #include <iterator>
91 #include <limits>
92 #include <tuple>
93 #include <utility>
94 #include <vector>
95 
96 using namespace llvm;
97 using namespace llvm::PatternMatch;
98 
99 #define DEBUG_TYPE "aarch64-lower"
100 
101 STATISTIC(NumTailCalls, "Number of tail calls");
102 STATISTIC(NumShiftInserts, "Number of vector shift inserts");
103 STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
104 
105 // FIXME: The necessary dtprel relocations don't seem to be supported
106 // well in the GNU bfd and gold linkers at the moment. Therefore, by
107 // default, for now, fall back to GeneralDynamic code generation.
108 cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
109     "aarch64-elf-ldtls-generation", cl::Hidden,
110     cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
111     cl::init(false));
112 
113 static cl::opt<bool>
114 EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
115                          cl::desc("Enable AArch64 logical imm instruction "
116                                   "optimization"),
117                          cl::init(true));
118 
119 // Temporary option added for the purpose of testing functionality added
120 // to DAGCombiner.cpp in D92230. It is expected that this can be removed
121 // in future when both implementations will be based off MGATHER rather
122 // than the GLD1 nodes added for the SVE gather load intrinsics.
123 static cl::opt<bool>
124 EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
125                                 cl::desc("Combine extends of AArch64 masked "
126                                          "gather intrinsics"),
127                                 cl::init(true));
128 
129 /// Value type used for condition codes.
130 static const MVT MVT_CC = MVT::i32;
131 
132 static inline EVT getPackedSVEVectorVT(EVT VT) {
133   switch (VT.getSimpleVT().SimpleTy) {
134   default:
135     llvm_unreachable("unexpected element type for vector");
136   case MVT::i8:
137     return MVT::nxv16i8;
138   case MVT::i16:
139     return MVT::nxv8i16;
140   case MVT::i32:
141     return MVT::nxv4i32;
142   case MVT::i64:
143     return MVT::nxv2i64;
144   case MVT::f16:
145     return MVT::nxv8f16;
146   case MVT::f32:
147     return MVT::nxv4f32;
148   case MVT::f64:
149     return MVT::nxv2f64;
150   case MVT::bf16:
151     return MVT::nxv8bf16;
152   }
153 }
154 
155 // NOTE: Currently there's only a need to return integer vector types. If this
156 // changes then just add an extra "type" parameter.
157 static inline EVT getPackedSVEVectorVT(ElementCount EC) {
158   switch (EC.getKnownMinValue()) {
159   default:
160     llvm_unreachable("unexpected element count for vector");
161   case 16:
162     return MVT::nxv16i8;
163   case 8:
164     return MVT::nxv8i16;
165   case 4:
166     return MVT::nxv4i32;
167   case 2:
168     return MVT::nxv2i64;
169   }
170 }
171 
172 static inline EVT getPromotedVTForPredicate(EVT VT) {
173   assert(VT.isScalableVector() && (VT.getVectorElementType() == MVT::i1) &&
174          "Expected scalable predicate vector type!");
175   switch (VT.getVectorMinNumElements()) {
176   default:
177     llvm_unreachable("unexpected element count for vector");
178   case 2:
179     return MVT::nxv2i64;
180   case 4:
181     return MVT::nxv4i32;
182   case 8:
183     return MVT::nxv8i16;
184   case 16:
185     return MVT::nxv16i8;
186   }
187 }
188 
189 /// Returns true if VT's elements occupy the lowest bit positions of its
190 /// associated register class without any intervening space.
191 ///
192 /// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
193 /// same register class, but only nxv8f16 can be treated as a packed vector.
194 static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {
195   assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
196          "Expected legal vector type!");
197   return VT.isFixedLengthVector() ||
198          VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock;
199 }
200 
201 // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
202 // predicate and end with a passthru value matching the result type.
203 static bool isMergePassthruOpcode(unsigned Opc) {
204   switch (Opc) {
205   default:
206     return false;
207   case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
208   case AArch64ISD::BSWAP_MERGE_PASSTHRU:
209   case AArch64ISD::REVH_MERGE_PASSTHRU:
210   case AArch64ISD::REVW_MERGE_PASSTHRU:
211   case AArch64ISD::REVD_MERGE_PASSTHRU:
212   case AArch64ISD::CTLZ_MERGE_PASSTHRU:
213   case AArch64ISD::CTPOP_MERGE_PASSTHRU:
214   case AArch64ISD::DUP_MERGE_PASSTHRU:
215   case AArch64ISD::ABS_MERGE_PASSTHRU:
216   case AArch64ISD::NEG_MERGE_PASSTHRU:
217   case AArch64ISD::FNEG_MERGE_PASSTHRU:
218   case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
219   case AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU:
220   case AArch64ISD::FCEIL_MERGE_PASSTHRU:
221   case AArch64ISD::FFLOOR_MERGE_PASSTHRU:
222   case AArch64ISD::FNEARBYINT_MERGE_PASSTHRU:
223   case AArch64ISD::FRINT_MERGE_PASSTHRU:
224   case AArch64ISD::FROUND_MERGE_PASSTHRU:
225   case AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU:
226   case AArch64ISD::FTRUNC_MERGE_PASSTHRU:
227   case AArch64ISD::FP_ROUND_MERGE_PASSTHRU:
228   case AArch64ISD::FP_EXTEND_MERGE_PASSTHRU:
229   case AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU:
230   case AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU:
231   case AArch64ISD::FCVTZU_MERGE_PASSTHRU:
232   case AArch64ISD::FCVTZS_MERGE_PASSTHRU:
233   case AArch64ISD::FSQRT_MERGE_PASSTHRU:
234   case AArch64ISD::FRECPX_MERGE_PASSTHRU:
235   case AArch64ISD::FABS_MERGE_PASSTHRU:
236     return true;
237   }
238 }
239 
240 // Returns true if inactive lanes are known to be zeroed by construction.
241 static bool isZeroingInactiveLanes(SDValue Op) {
242   switch (Op.getOpcode()) {
243   default:
244     // We guarantee i1 splat_vectors to zero the other lanes by
245     // implementing it with ptrue and possibly a punpklo for nxv1i1.
246     if (ISD::isConstantSplatVectorAllOnes(Op.getNode()))
247       return true;
248     return false;
249   case AArch64ISD::PTRUE:
250   case AArch64ISD::SETCC_MERGE_ZERO:
251     return true;
252   case ISD::INTRINSIC_WO_CHAIN:
253     switch (Op.getConstantOperandVal(0)) {
254     default:
255       return false;
256     case Intrinsic::aarch64_sve_ptrue:
257     case Intrinsic::aarch64_sve_pnext:
258     case Intrinsic::aarch64_sve_cmpeq_wide:
259     case Intrinsic::aarch64_sve_cmpne_wide:
260     case Intrinsic::aarch64_sve_cmpge_wide:
261     case Intrinsic::aarch64_sve_cmpgt_wide:
262     case Intrinsic::aarch64_sve_cmplt_wide:
263     case Intrinsic::aarch64_sve_cmple_wide:
264     case Intrinsic::aarch64_sve_cmphs_wide:
265     case Intrinsic::aarch64_sve_cmphi_wide:
266     case Intrinsic::aarch64_sve_cmplo_wide:
267     case Intrinsic::aarch64_sve_cmpls_wide:
268       return true;
269     }
270   }
271 }
272 
273 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
274                                              const AArch64Subtarget &STI)
275     : TargetLowering(TM), Subtarget(&STI) {
276   // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so
277   // we have to make something up. Arbitrarily, choose ZeroOrOne.
278   setBooleanContents(ZeroOrOneBooleanContent);
279   // When comparing vectors the result sets the different elements in the
280   // vector to all-one or all-zero.
281   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
282 
283   // Set up the register classes.
284   addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass);
285   addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass);
286 
287   if (Subtarget->hasLS64()) {
288     addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass);
289     setOperationAction(ISD::LOAD, MVT::i64x8, Custom);
290     setOperationAction(ISD::STORE, MVT::i64x8, Custom);
291   }
292 
293   if (Subtarget->hasFPARMv8()) {
294     addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
295     addRegisterClass(MVT::bf16, &AArch64::FPR16RegClass);
296     addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
297     addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
298     addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
299   }
300 
301   if (Subtarget->hasNEON()) {
302     addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
303     addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
304     // Someone set us up the NEON.
305     addDRTypeForNEON(MVT::v2f32);
306     addDRTypeForNEON(MVT::v8i8);
307     addDRTypeForNEON(MVT::v4i16);
308     addDRTypeForNEON(MVT::v2i32);
309     addDRTypeForNEON(MVT::v1i64);
310     addDRTypeForNEON(MVT::v1f64);
311     addDRTypeForNEON(MVT::v4f16);
312     if (Subtarget->hasBF16())
313       addDRTypeForNEON(MVT::v4bf16);
314 
315     addQRTypeForNEON(MVT::v4f32);
316     addQRTypeForNEON(MVT::v2f64);
317     addQRTypeForNEON(MVT::v16i8);
318     addQRTypeForNEON(MVT::v8i16);
319     addQRTypeForNEON(MVT::v4i32);
320     addQRTypeForNEON(MVT::v2i64);
321     addQRTypeForNEON(MVT::v8f16);
322     if (Subtarget->hasBF16())
323       addQRTypeForNEON(MVT::v8bf16);
324   }
325 
326   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
327     // Add legal sve predicate types
328     addRegisterClass(MVT::nxv1i1, &AArch64::PPRRegClass);
329     addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
330     addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
331     addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
332     addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
333 
334     // Add legal sve data types
335     addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
336     addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
337     addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
338     addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
339 
340     addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
341     addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
342     addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
343     addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
344     addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
345     addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
346 
347     if (Subtarget->hasBF16()) {
348       addRegisterClass(MVT::nxv2bf16, &AArch64::ZPRRegClass);
349       addRegisterClass(MVT::nxv4bf16, &AArch64::ZPRRegClass);
350       addRegisterClass(MVT::nxv8bf16, &AArch64::ZPRRegClass);
351     }
352 
353     if (Subtarget->useSVEForFixedLengthVectors()) {
354       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
355         if (useSVEForFixedLengthVectorVT(VT))
356           addRegisterClass(VT, &AArch64::ZPRRegClass);
357 
358       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
359         if (useSVEForFixedLengthVectorVT(VT))
360           addRegisterClass(VT, &AArch64::ZPRRegClass);
361     }
362   }
363 
364   // Compute derived properties from the register classes
365   computeRegisterProperties(Subtarget->getRegisterInfo());
366 
367   // Provide all sorts of operation actions
368   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
369   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
370   setOperationAction(ISD::SETCC, MVT::i32, Custom);
371   setOperationAction(ISD::SETCC, MVT::i64, Custom);
372   setOperationAction(ISD::SETCC, MVT::f16, Custom);
373   setOperationAction(ISD::SETCC, MVT::f32, Custom);
374   setOperationAction(ISD::SETCC, MVT::f64, Custom);
375   setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
376   setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
377   setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
378   setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
379   setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
380   setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
381   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
382   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
383   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
384   setOperationAction(ISD::BR_CC, MVT::i32, Custom);
385   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
386   setOperationAction(ISD::BR_CC, MVT::f16, Custom);
387   setOperationAction(ISD::BR_CC, MVT::f32, Custom);
388   setOperationAction(ISD::BR_CC, MVT::f64, Custom);
389   setOperationAction(ISD::SELECT, MVT::i32, Custom);
390   setOperationAction(ISD::SELECT, MVT::i64, Custom);
391   setOperationAction(ISD::SELECT, MVT::f16, Custom);
392   setOperationAction(ISD::SELECT, MVT::f32, Custom);
393   setOperationAction(ISD::SELECT, MVT::f64, Custom);
394   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
395   setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
396   setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
397   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
398   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
399   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
400   setOperationAction(ISD::JumpTable, MVT::i64, Custom);
401 
402   setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
403   setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
404   setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
405 
406   setOperationAction(ISD::FREM, MVT::f32, Expand);
407   setOperationAction(ISD::FREM, MVT::f64, Expand);
408   setOperationAction(ISD::FREM, MVT::f80, Expand);
409 
410   setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
411 
412   // Custom lowering hooks are needed for XOR
413   // to fold it into CSINC/CSINV.
414   setOperationAction(ISD::XOR, MVT::i32, Custom);
415   setOperationAction(ISD::XOR, MVT::i64, Custom);
416 
417   // Virtually no operation on f128 is legal, but LLVM can't expand them when
418   // there's a valid register class, so we need custom operations in most cases.
419   setOperationAction(ISD::FABS, MVT::f128, Expand);
420   setOperationAction(ISD::FADD, MVT::f128, LibCall);
421   setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
422   setOperationAction(ISD::FCOS, MVT::f128, Expand);
423   setOperationAction(ISD::FDIV, MVT::f128, LibCall);
424   setOperationAction(ISD::FMA, MVT::f128, Expand);
425   setOperationAction(ISD::FMUL, MVT::f128, LibCall);
426   setOperationAction(ISD::FNEG, MVT::f128, Expand);
427   setOperationAction(ISD::FPOW, MVT::f128, Expand);
428   setOperationAction(ISD::FREM, MVT::f128, Expand);
429   setOperationAction(ISD::FRINT, MVT::f128, Expand);
430   setOperationAction(ISD::FSIN, MVT::f128, Expand);
431   setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
432   setOperationAction(ISD::FSQRT, MVT::f128, Expand);
433   setOperationAction(ISD::FSUB, MVT::f128, LibCall);
434   setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
435   setOperationAction(ISD::SETCC, MVT::f128, Custom);
436   setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
437   setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
438   setOperationAction(ISD::BR_CC, MVT::f128, Custom);
439   setOperationAction(ISD::SELECT, MVT::f128, Custom);
440   setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
441   setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
442   // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently
443   // aren't handled.
444 
445   // Lowering for many of the conversions is actually specified by the non-f128
446   // type. The LowerXXX function will be trivial when f128 isn't involved.
447   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
448   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
449   setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
450   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
451   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
452   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
453   setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
454   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
455   setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
456   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
457   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
458   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
459   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
460   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
461   setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
462   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
463   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
464   setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
465   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
466   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
467   setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
468   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
469   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
470   setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
471   setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
472   setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
473   setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
474   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
475   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
476   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
477 
478   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom);
479   setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
480   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom);
481   setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
482 
483   // Variable arguments.
484   setOperationAction(ISD::VASTART, MVT::Other, Custom);
485   setOperationAction(ISD::VAARG, MVT::Other, Custom);
486   setOperationAction(ISD::VACOPY, MVT::Other, Custom);
487   setOperationAction(ISD::VAEND, MVT::Other, Expand);
488 
489   // Variable-sized objects.
490   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
491   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
492 
493   if (Subtarget->isTargetWindows())
494     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
495   else
496     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
497 
498   // Constant pool entries
499   setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
500 
501   // BlockAddress
502   setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
503 
504   // AArch64 lacks both left-rotate and popcount instructions.
505   setOperationAction(ISD::ROTL, MVT::i32, Expand);
506   setOperationAction(ISD::ROTL, MVT::i64, Expand);
507   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
508     setOperationAction(ISD::ROTL, VT, Expand);
509     setOperationAction(ISD::ROTR, VT, Expand);
510   }
511 
512   // AArch64 doesn't have i32 MULH{S|U}.
513   setOperationAction(ISD::MULHU, MVT::i32, Expand);
514   setOperationAction(ISD::MULHS, MVT::i32, Expand);
515 
516   // AArch64 doesn't have {U|S}MUL_LOHI.
517   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
518   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
519 
520   setOperationAction(ISD::CTPOP, MVT::i32, Custom);
521   setOperationAction(ISD::CTPOP, MVT::i64, Custom);
522   setOperationAction(ISD::CTPOP, MVT::i128, Custom);
523 
524   setOperationAction(ISD::ABS, MVT::i32, Custom);
525   setOperationAction(ISD::ABS, MVT::i64, Custom);
526 
527   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
528   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
529   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
530     setOperationAction(ISD::SDIVREM, VT, Expand);
531     setOperationAction(ISD::UDIVREM, VT, Expand);
532   }
533   setOperationAction(ISD::SREM, MVT::i32, Expand);
534   setOperationAction(ISD::SREM, MVT::i64, Expand);
535   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
536   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
537   setOperationAction(ISD::UREM, MVT::i32, Expand);
538   setOperationAction(ISD::UREM, MVT::i64, Expand);
539 
540   // Custom lower Add/Sub/Mul with overflow.
541   setOperationAction(ISD::SADDO, MVT::i32, Custom);
542   setOperationAction(ISD::SADDO, MVT::i64, Custom);
543   setOperationAction(ISD::UADDO, MVT::i32, Custom);
544   setOperationAction(ISD::UADDO, MVT::i64, Custom);
545   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
546   setOperationAction(ISD::SSUBO, MVT::i64, Custom);
547   setOperationAction(ISD::USUBO, MVT::i32, Custom);
548   setOperationAction(ISD::USUBO, MVT::i64, Custom);
549   setOperationAction(ISD::SMULO, MVT::i32, Custom);
550   setOperationAction(ISD::SMULO, MVT::i64, Custom);
551   setOperationAction(ISD::UMULO, MVT::i32, Custom);
552   setOperationAction(ISD::UMULO, MVT::i64, Custom);
553 
554   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
555   setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
556   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
557   setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
558   setOperationAction(ISD::SADDO_CARRY, MVT::i32, Custom);
559   setOperationAction(ISD::SADDO_CARRY, MVT::i64, Custom);
560   setOperationAction(ISD::SSUBO_CARRY, MVT::i32, Custom);
561   setOperationAction(ISD::SSUBO_CARRY, MVT::i64, Custom);
562 
563   setOperationAction(ISD::FSIN, MVT::f32, Expand);
564   setOperationAction(ISD::FSIN, MVT::f64, Expand);
565   setOperationAction(ISD::FCOS, MVT::f32, Expand);
566   setOperationAction(ISD::FCOS, MVT::f64, Expand);
567   setOperationAction(ISD::FPOW, MVT::f32, Expand);
568   setOperationAction(ISD::FPOW, MVT::f64, Expand);
569   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
570   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
571   if (Subtarget->hasFullFP16())
572     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom);
573   else
574     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote);
575 
576   for (auto Op : {ISD::FREM,        ISD::FPOW,         ISD::FPOWI,
577                   ISD::FCOS,        ISD::FSIN,         ISD::FSINCOS,
578                   ISD::FEXP,        ISD::FEXP2,        ISD::FLOG,
579                   ISD::FLOG2,       ISD::FLOG10,       ISD::STRICT_FREM,
580                   ISD::STRICT_FPOW, ISD::STRICT_FPOWI, ISD::STRICT_FCOS,
581                   ISD::STRICT_FSIN, ISD::STRICT_FEXP,  ISD::STRICT_FEXP2,
582                   ISD::STRICT_FLOG, ISD::STRICT_FLOG2, ISD::STRICT_FLOG10}) {
583     setOperationAction(Op, MVT::f16, Promote);
584     setOperationAction(Op, MVT::v4f16, Expand);
585     setOperationAction(Op, MVT::v8f16, Expand);
586   }
587 
588   if (!Subtarget->hasFullFP16()) {
589     for (auto Op :
590          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
591           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
592           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
593           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
594           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
595           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
596           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
597           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
598           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
599           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
600           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
601           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
602           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
603           ISD::STRICT_FMAXIMUM})
604       setOperationAction(Op, MVT::f16, Promote);
605 
606     // Round-to-integer need custom lowering for fp16, as Promote doesn't work
607     // because the result type is integer.
608     for (auto Op : {ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT,
609                     ISD::STRICT_LLRINT})
610       setOperationAction(Op, MVT::f16, Custom);
611 
612     // promote v4f16 to v4f32 when that is known to be safe.
613     setOperationAction(ISD::FADD,        MVT::v4f16, Promote);
614     setOperationAction(ISD::FSUB,        MVT::v4f16, Promote);
615     setOperationAction(ISD::FMUL,        MVT::v4f16, Promote);
616     setOperationAction(ISD::FDIV,        MVT::v4f16, Promote);
617     AddPromotedToType(ISD::FADD,         MVT::v4f16, MVT::v4f32);
618     AddPromotedToType(ISD::FSUB,         MVT::v4f16, MVT::v4f32);
619     AddPromotedToType(ISD::FMUL,         MVT::v4f16, MVT::v4f32);
620     AddPromotedToType(ISD::FDIV,         MVT::v4f16, MVT::v4f32);
621 
622     setOperationAction(ISD::FABS,        MVT::v4f16, Expand);
623     setOperationAction(ISD::FNEG,        MVT::v4f16, Expand);
624     setOperationAction(ISD::FROUND,      MVT::v4f16, Expand);
625     setOperationAction(ISD::FROUNDEVEN,  MVT::v4f16, Expand);
626     setOperationAction(ISD::FMA,         MVT::v4f16, Expand);
627     setOperationAction(ISD::SETCC,       MVT::v4f16, Expand);
628     setOperationAction(ISD::BR_CC,       MVT::v4f16, Expand);
629     setOperationAction(ISD::SELECT,      MVT::v4f16, Expand);
630     setOperationAction(ISD::SELECT_CC,   MVT::v4f16, Expand);
631     setOperationAction(ISD::FTRUNC,      MVT::v4f16, Expand);
632     setOperationAction(ISD::FCOPYSIGN,   MVT::v4f16, Expand);
633     setOperationAction(ISD::FFLOOR,      MVT::v4f16, Expand);
634     setOperationAction(ISD::FCEIL,       MVT::v4f16, Expand);
635     setOperationAction(ISD::FRINT,       MVT::v4f16, Expand);
636     setOperationAction(ISD::FNEARBYINT,  MVT::v4f16, Expand);
637     setOperationAction(ISD::FSQRT,       MVT::v4f16, Expand);
638 
639     setOperationAction(ISD::FABS,        MVT::v8f16, Expand);
640     setOperationAction(ISD::FADD,        MVT::v8f16, Expand);
641     setOperationAction(ISD::FCEIL,       MVT::v8f16, Expand);
642     setOperationAction(ISD::FCOPYSIGN,   MVT::v8f16, Expand);
643     setOperationAction(ISD::FDIV,        MVT::v8f16, Expand);
644     setOperationAction(ISD::FFLOOR,      MVT::v8f16, Expand);
645     setOperationAction(ISD::FMA,         MVT::v8f16, Expand);
646     setOperationAction(ISD::FMUL,        MVT::v8f16, Expand);
647     setOperationAction(ISD::FNEARBYINT,  MVT::v8f16, Expand);
648     setOperationAction(ISD::FNEG,        MVT::v8f16, Expand);
649     setOperationAction(ISD::FROUND,      MVT::v8f16, Expand);
650     setOperationAction(ISD::FROUNDEVEN,  MVT::v8f16, Expand);
651     setOperationAction(ISD::FRINT,       MVT::v8f16, Expand);
652     setOperationAction(ISD::FSQRT,       MVT::v8f16, Expand);
653     setOperationAction(ISD::FSUB,        MVT::v8f16, Expand);
654     setOperationAction(ISD::FTRUNC,      MVT::v8f16, Expand);
655     setOperationAction(ISD::SETCC,       MVT::v8f16, Expand);
656     setOperationAction(ISD::BR_CC,       MVT::v8f16, Expand);
657     setOperationAction(ISD::SELECT,      MVT::v8f16, Expand);
658     setOperationAction(ISD::SELECT_CC,   MVT::v8f16, Expand);
659     setOperationAction(ISD::FP_EXTEND,   MVT::v8f16, Expand);
660   }
661 
662   // AArch64 has implementations of a lot of rounding-like FP operations.
663   for (auto Op :
664        {ISD::FFLOOR,          ISD::FNEARBYINT,      ISD::FCEIL,
665         ISD::FRINT,           ISD::FTRUNC,          ISD::FROUND,
666         ISD::FROUNDEVEN,      ISD::FMINNUM,         ISD::FMAXNUM,
667         ISD::FMINIMUM,        ISD::FMAXIMUM,        ISD::LROUND,
668         ISD::LLROUND,         ISD::LRINT,           ISD::LLRINT,
669         ISD::STRICT_FFLOOR,   ISD::STRICT_FCEIL,    ISD::STRICT_FNEARBYINT,
670         ISD::STRICT_FRINT,    ISD::STRICT_FTRUNC,   ISD::STRICT_FROUNDEVEN,
671         ISD::STRICT_FROUND,   ISD::STRICT_FMINNUM,  ISD::STRICT_FMAXNUM,
672         ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND,
673         ISD::STRICT_LLROUND,  ISD::STRICT_LRINT,    ISD::STRICT_LLRINT}) {
674     for (MVT Ty : {MVT::f32, MVT::f64})
675       setOperationAction(Op, Ty, Legal);
676     if (Subtarget->hasFullFP16())
677       setOperationAction(Op, MVT::f16, Legal);
678   }
679 
680   // Basic strict FP operations are legal
681   for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
682                   ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) {
683     for (MVT Ty : {MVT::f32, MVT::f64})
684       setOperationAction(Op, Ty, Legal);
685     if (Subtarget->hasFullFP16())
686       setOperationAction(Op, MVT::f16, Legal);
687   }
688 
689   // Strict conversion to a larger type is legal
690   for (auto VT : {MVT::f32, MVT::f64})
691     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
692 
693   setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
694 
695   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
696   setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
697 
698   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
699   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
700   setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
701   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
702   setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
703 
704   // Generate outline atomics library calls only if LSE was not specified for
705   // subtarget
706   if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) {
707     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall);
708     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall);
709     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall);
710     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall);
711     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall);
712     setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall);
713     setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall);
714     setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall);
715     setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall);
716     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall);
717     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall);
718     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall);
719     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall);
720     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall);
721     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall);
722     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall);
723     setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall);
724     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall);
725     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall);
726     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall);
727     setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall);
728     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall);
729     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall);
730     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall);
731     setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall);
732 #define LCALLNAMES(A, B, N)                                                    \
733   setLibcallName(A##N##_RELAX, #B #N "_relax");                                \
734   setLibcallName(A##N##_ACQ, #B #N "_acq");                                    \
735   setLibcallName(A##N##_REL, #B #N "_rel");                                    \
736   setLibcallName(A##N##_ACQ_REL, #B #N "_acq_rel");
737 #define LCALLNAME4(A, B)                                                       \
738   LCALLNAMES(A, B, 1)                                                          \
739   LCALLNAMES(A, B, 2) LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8)
740 #define LCALLNAME5(A, B)                                                       \
741   LCALLNAMES(A, B, 1)                                                          \
742   LCALLNAMES(A, B, 2)                                                          \
743   LCALLNAMES(A, B, 4) LCALLNAMES(A, B, 8) LCALLNAMES(A, B, 16)
744     LCALLNAME5(RTLIB::OUTLINE_ATOMIC_CAS, __aarch64_cas)
745     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_SWP, __aarch64_swp)
746     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDADD, __aarch64_ldadd)
747     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDSET, __aarch64_ldset)
748     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDCLR, __aarch64_ldclr)
749     LCALLNAME4(RTLIB::OUTLINE_ATOMIC_LDEOR, __aarch64_ldeor)
750 #undef LCALLNAMES
751 #undef LCALLNAME4
752 #undef LCALLNAME5
753   }
754 
755   // 128-bit loads and stores can be done without expanding
756   setOperationAction(ISD::LOAD, MVT::i128, Custom);
757   setOperationAction(ISD::STORE, MVT::i128, Custom);
758 
759   // Aligned 128-bit loads and stores are single-copy atomic according to the
760   // v8.4a spec.
761   if (Subtarget->hasLSE2()) {
762     setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
763     setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
764   }
765 
766   // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the
767   // custom lowering, as there are no un-paired non-temporal stores and
768   // legalization will break up 256 bit inputs.
769   setOperationAction(ISD::STORE, MVT::v32i8, Custom);
770   setOperationAction(ISD::STORE, MVT::v16i16, Custom);
771   setOperationAction(ISD::STORE, MVT::v16f16, Custom);
772   setOperationAction(ISD::STORE, MVT::v8i32, Custom);
773   setOperationAction(ISD::STORE, MVT::v8f32, Custom);
774   setOperationAction(ISD::STORE, MVT::v4f64, Custom);
775   setOperationAction(ISD::STORE, MVT::v4i64, Custom);
776 
777   // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
778   // This requires the Performance Monitors extension.
779   if (Subtarget->hasPerfMon())
780     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
781 
782   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
783       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
784     // Issue __sincos_stret if available.
785     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
786     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
787   } else {
788     setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
789     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
790   }
791 
792   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
793     // MSVCRT doesn't have powi; fall back to pow
794     setLibcallName(RTLIB::POWI_F32, nullptr);
795     setLibcallName(RTLIB::POWI_F64, nullptr);
796   }
797 
798   // Make floating-point constants legal for the large code model, so they don't
799   // become loads from the constant pool.
800   if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) {
801     setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
802     setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
803   }
804 
805   // AArch64 does not have floating-point extending loads, i1 sign-extending
806   // load, floating-point truncating stores, or v2i32->v2i16 truncating store.
807   for (MVT VT : MVT::fp_valuetypes()) {
808     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
809     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
810     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
811     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
812   }
813   for (MVT VT : MVT::integer_valuetypes())
814     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
815 
816   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
817   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
818   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
819   setTruncStoreAction(MVT::f128, MVT::f80, Expand);
820   setTruncStoreAction(MVT::f128, MVT::f64, Expand);
821   setTruncStoreAction(MVT::f128, MVT::f32, Expand);
822   setTruncStoreAction(MVT::f128, MVT::f16, Expand);
823 
824   setOperationAction(ISD::BITCAST, MVT::i16, Custom);
825   setOperationAction(ISD::BITCAST, MVT::f16, Custom);
826   setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
827 
828   // Indexed loads and stores are supported.
829   for (unsigned im = (unsigned)ISD::PRE_INC;
830        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
831     setIndexedLoadAction(im, MVT::i8, Legal);
832     setIndexedLoadAction(im, MVT::i16, Legal);
833     setIndexedLoadAction(im, MVT::i32, Legal);
834     setIndexedLoadAction(im, MVT::i64, Legal);
835     setIndexedLoadAction(im, MVT::f64, Legal);
836     setIndexedLoadAction(im, MVT::f32, Legal);
837     setIndexedLoadAction(im, MVT::f16, Legal);
838     setIndexedLoadAction(im, MVT::bf16, Legal);
839     setIndexedStoreAction(im, MVT::i8, Legal);
840     setIndexedStoreAction(im, MVT::i16, Legal);
841     setIndexedStoreAction(im, MVT::i32, Legal);
842     setIndexedStoreAction(im, MVT::i64, Legal);
843     setIndexedStoreAction(im, MVT::f64, Legal);
844     setIndexedStoreAction(im, MVT::f32, Legal);
845     setIndexedStoreAction(im, MVT::f16, Legal);
846     setIndexedStoreAction(im, MVT::bf16, Legal);
847   }
848 
849   // Trap.
850   setOperationAction(ISD::TRAP, MVT::Other, Legal);
851   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
852   setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
853 
854   // We combine OR nodes for bitfield operations.
855   setTargetDAGCombine(ISD::OR);
856   // Try to create BICs for vector ANDs.
857   setTargetDAGCombine(ISD::AND);
858 
859   // Vector add and sub nodes may conceal a high-half opportunity.
860   // Also, try to fold ADD into CSINC/CSINV..
861   setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP,
862                        ISD::UINT_TO_FP});
863 
864   setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
865                        ISD::FP_TO_UINT_SAT, ISD::FDIV});
866 
867   // Try and combine setcc with csel
868   setTargetDAGCombine(ISD::SETCC);
869 
870   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
871 
872   setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND,
873                        ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG,
874                        ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR,
875                        ISD::INSERT_SUBVECTOR, ISD::STORE});
876   if (Subtarget->supportsAddressTopByteIgnored())
877     setTargetDAGCombine(ISD::LOAD);
878 
879   setTargetDAGCombine(ISD::MUL);
880 
881   setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
882 
883   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
884                        ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
885                        ISD::VECREDUCE_ADD, ISD::STEP_VECTOR});
886 
887   setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER});
888 
889   setTargetDAGCombine(ISD::FP_EXTEND);
890 
891   setTargetDAGCombine(ISD::GlobalAddress);
892 
893   // In case of strict alignment, avoid an excessive number of byte wide stores.
894   MaxStoresPerMemsetOptSize = 8;
895   MaxStoresPerMemset =
896       Subtarget->requiresStrictAlign() ? MaxStoresPerMemsetOptSize : 32;
897 
898   MaxGluedStoresPerMemcpy = 4;
899   MaxStoresPerMemcpyOptSize = 4;
900   MaxStoresPerMemcpy =
901       Subtarget->requiresStrictAlign() ? MaxStoresPerMemcpyOptSize : 16;
902 
903   MaxStoresPerMemmoveOptSize = 4;
904   MaxStoresPerMemmove = 4;
905 
906   MaxLoadsPerMemcmpOptSize = 4;
907   MaxLoadsPerMemcmp =
908       Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8;
909 
910   setStackPointerRegisterToSaveRestore(AArch64::SP);
911 
912   setSchedulingPreference(Sched::Hybrid);
913 
914   EnableExtLdPromotion = true;
915 
916   // Set required alignment.
917   setMinFunctionAlignment(Align(4));
918   // Set preferred alignments.
919   setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
920   setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
921   setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
922 
923   // Only change the limit for entries in a jump table if specified by
924   // the sub target, but not at the command line.
925   unsigned MaxJT = STI.getMaximumJumpTableSize();
926   if (MaxJT && getMaximumJumpTableSize() == UINT_MAX)
927     setMaximumJumpTableSize(MaxJT);
928 
929   setHasExtractBitsInsn(true);
930 
931   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
932 
933   if (Subtarget->hasNEON()) {
934     // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
935     // silliness like this:
936     for (auto Op :
937          {ISD::SELECT,         ISD::SELECT_CC,      ISD::SETCC,
938           ISD::BR_CC,          ISD::FADD,           ISD::FSUB,
939           ISD::FMUL,           ISD::FDIV,           ISD::FMA,
940           ISD::FNEG,           ISD::FABS,           ISD::FCEIL,
941           ISD::FSQRT,          ISD::FFLOOR,         ISD::FNEARBYINT,
942           ISD::FRINT,          ISD::FROUND,         ISD::FROUNDEVEN,
943           ISD::FTRUNC,         ISD::FMINNUM,        ISD::FMAXNUM,
944           ISD::FMINIMUM,       ISD::FMAXIMUM,       ISD::STRICT_FADD,
945           ISD::STRICT_FSUB,    ISD::STRICT_FMUL,    ISD::STRICT_FDIV,
946           ISD::STRICT_FMA,     ISD::STRICT_FCEIL,   ISD::STRICT_FFLOOR,
947           ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,   ISD::STRICT_FNEARBYINT,
948           ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  ISD::STRICT_FROUNDEVEN,
949           ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM,
950           ISD::STRICT_FMAXIMUM})
951       setOperationAction(Op, MVT::v1f64, Expand);
952 
953     for (auto Op :
954          {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP,
955           ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL,
956           ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT,
957           ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND})
958       setOperationAction(Op, MVT::v1i64, Expand);
959 
960     // AArch64 doesn't have a direct vector ->f32 conversion instructions for
961     // elements smaller than i32, so promote the input to i32 first.
962     setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32);
963     setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32);
964 
965     // Similarly, there is no direct i32 -> f64 vector conversion instruction.
966     // Or, direct i32 -> f16 vector conversion.  Set it so custom, so the
967     // conversion happens in two steps: v4i32 -> v4f32 -> v4f16
968     for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
969                     ISD::STRICT_UINT_TO_FP})
970       for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32})
971         setOperationAction(Op, VT, Custom);
972 
973     if (Subtarget->hasFullFP16()) {
974       setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
975       setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
976       setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
977       setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
978       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
979       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
980       setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
981       setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
982     } else {
983       // when AArch64 doesn't have fullfp16 support, promote the input
984       // to i32 first.
985       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32);
986       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32);
987       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v16i8, MVT::v16i32);
988       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v16i8, MVT::v16i32);
989       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32);
990       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32);
991       setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32);
992       setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32);
993     }
994 
995     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
996     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
997     setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal);
998     setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal);
999     setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom);
1000     setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom);
1001     setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1002     setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom);
1003     for (auto VT : {MVT::v1i64, MVT::v2i64}) {
1004       setOperationAction(ISD::UMAX, VT, Custom);
1005       setOperationAction(ISD::SMAX, VT, Custom);
1006       setOperationAction(ISD::UMIN, VT, Custom);
1007       setOperationAction(ISD::SMIN, VT, Custom);
1008     }
1009 
1010     // AArch64 doesn't have MUL.2d:
1011     setOperationAction(ISD::MUL, MVT::v2i64, Expand);
1012     // Custom handling for some quad-vector types to detect MULL.
1013     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
1014     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1015     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1016 
1017     // Saturates
1018     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1019                     MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1020       setOperationAction(ISD::SADDSAT, VT, Legal);
1021       setOperationAction(ISD::UADDSAT, VT, Legal);
1022       setOperationAction(ISD::SSUBSAT, VT, Legal);
1023       setOperationAction(ISD::USUBSAT, VT, Legal);
1024     }
1025 
1026     for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1027                    MVT::v4i32}) {
1028       setOperationAction(ISD::AVGFLOORS, VT, Legal);
1029       setOperationAction(ISD::AVGFLOORU, VT, Legal);
1030       setOperationAction(ISD::AVGCEILS, VT, Legal);
1031       setOperationAction(ISD::AVGCEILU, VT, Legal);
1032       setOperationAction(ISD::ABDS, VT, Legal);
1033       setOperationAction(ISD::ABDU, VT, Legal);
1034     }
1035 
1036     // Vector reductions
1037     for (MVT VT : { MVT::v4f16, MVT::v2f32,
1038                     MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1039       if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) {
1040         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1041         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1042 
1043         setOperationAction(ISD::VECREDUCE_FADD, VT, Legal);
1044       }
1045     }
1046     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
1047                     MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1048       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1049       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1050       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1051       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1052       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1053     }
1054     setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
1055 
1056     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
1057     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
1058     // Likewise, narrowing and extending vector loads/stores aren't handled
1059     // directly.
1060     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1061       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
1062 
1063       if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
1064         setOperationAction(ISD::MULHS, VT, Legal);
1065         setOperationAction(ISD::MULHU, VT, Legal);
1066       } else {
1067         setOperationAction(ISD::MULHS, VT, Expand);
1068         setOperationAction(ISD::MULHU, VT, Expand);
1069       }
1070       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1071       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1072 
1073       setOperationAction(ISD::BSWAP, VT, Expand);
1074       setOperationAction(ISD::CTTZ, VT, Expand);
1075 
1076       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
1077         setTruncStoreAction(VT, InnerVT, Expand);
1078         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1079         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1080         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1081       }
1082     }
1083 
1084     // AArch64 has implementations of a lot of rounding-like FP operations.
1085     for (auto Op :
1086          {ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
1087           ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR,
1088           ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT,
1089           ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) {
1090       for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64})
1091         setOperationAction(Op, Ty, Legal);
1092       if (Subtarget->hasFullFP16())
1093         for (MVT Ty : {MVT::v4f16, MVT::v8f16})
1094           setOperationAction(Op, Ty, Legal);
1095     }
1096 
1097     setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom);
1098 
1099     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i16, MVT::v4i8, Custom);
1100     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1101     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom);
1102     setLoadExtAction(ISD::EXTLOAD,  MVT::v4i32, MVT::v4i8, Custom);
1103     setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1104     setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom);
1105 
1106     // ADDP custom lowering
1107     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1108       setOperationAction(ISD::ADD, VT, Custom);
1109     // FADDP custom lowering
1110     for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1111       setOperationAction(ISD::FADD, VT, Custom);
1112   }
1113 
1114   if (Subtarget->hasSME()) {
1115     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1116   }
1117 
1118   // FIXME: Move lowering for more nodes here if those are common between
1119   // SVE and SME.
1120   if (Subtarget->hasSVE() || Subtarget->hasSME()) {
1121     for (auto VT :
1122          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1123       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1124       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1125     }
1126   }
1127 
1128   if (Subtarget->hasSVE()) {
1129     for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
1130       setOperationAction(ISD::BITREVERSE, VT, Custom);
1131       setOperationAction(ISD::BSWAP, VT, Custom);
1132       setOperationAction(ISD::CTLZ, VT, Custom);
1133       setOperationAction(ISD::CTPOP, VT, Custom);
1134       setOperationAction(ISD::CTTZ, VT, Custom);
1135       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1136       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1137       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1138       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1139       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1140       setOperationAction(ISD::MGATHER, VT, Custom);
1141       setOperationAction(ISD::MSCATTER, VT, Custom);
1142       setOperationAction(ISD::MLOAD, VT, Custom);
1143       setOperationAction(ISD::MUL, VT, Custom);
1144       setOperationAction(ISD::MULHS, VT, Custom);
1145       setOperationAction(ISD::MULHU, VT, Custom);
1146       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1147       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1148       setOperationAction(ISD::SELECT, VT, Custom);
1149       setOperationAction(ISD::SETCC, VT, Custom);
1150       setOperationAction(ISD::SDIV, VT, Custom);
1151       setOperationAction(ISD::UDIV, VT, Custom);
1152       setOperationAction(ISD::SMIN, VT, Custom);
1153       setOperationAction(ISD::UMIN, VT, Custom);
1154       setOperationAction(ISD::SMAX, VT, Custom);
1155       setOperationAction(ISD::UMAX, VT, Custom);
1156       setOperationAction(ISD::SHL, VT, Custom);
1157       setOperationAction(ISD::SRL, VT, Custom);
1158       setOperationAction(ISD::SRA, VT, Custom);
1159       setOperationAction(ISD::ABS, VT, Custom);
1160       setOperationAction(ISD::ABDS, VT, Custom);
1161       setOperationAction(ISD::ABDU, VT, Custom);
1162       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1163       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1164       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1165       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1166       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1167       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1168       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1169       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1170 
1171       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
1172       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
1173       setOperationAction(ISD::SELECT_CC, VT, Expand);
1174       setOperationAction(ISD::ROTL, VT, Expand);
1175       setOperationAction(ISD::ROTR, VT, Expand);
1176 
1177       setOperationAction(ISD::SADDSAT, VT, Legal);
1178       setOperationAction(ISD::UADDSAT, VT, Legal);
1179       setOperationAction(ISD::SSUBSAT, VT, Legal);
1180       setOperationAction(ISD::USUBSAT, VT, Legal);
1181       setOperationAction(ISD::UREM, VT, Expand);
1182       setOperationAction(ISD::SREM, VT, Expand);
1183       setOperationAction(ISD::SDIVREM, VT, Expand);
1184       setOperationAction(ISD::UDIVREM, VT, Expand);
1185     }
1186 
1187     // Illegal unpacked integer vector types.
1188     for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) {
1189       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1190       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1191     }
1192 
1193     // Legalize unpacked bitcasts to REINTERPRET_CAST.
1194     for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
1195                     MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
1196       setOperationAction(ISD::BITCAST, VT, Custom);
1197 
1198     for (auto VT :
1199          { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8,
1200            MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 })
1201       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal);
1202 
1203     for (auto VT :
1204          {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1, MVT::nxv1i1}) {
1205       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1206       setOperationAction(ISD::SELECT, VT, Custom);
1207       setOperationAction(ISD::SETCC, VT, Custom);
1208       setOperationAction(ISD::TRUNCATE, VT, Custom);
1209       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1210       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1211       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1212 
1213       setOperationAction(ISD::SELECT_CC, VT, Expand);
1214       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1215       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1216 
1217       // There are no legal MVT::nxv16f## based types.
1218       if (VT != MVT::nxv16i1) {
1219         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1220         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1221       }
1222     }
1223 
1224     // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does
1225     for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64,
1226                     MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1227                     MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1228       setOperationAction(ISD::MLOAD, VT, Custom);
1229       setOperationAction(ISD::MSTORE, VT, Custom);
1230       setOperationAction(ISD::MGATHER, VT, Custom);
1231       setOperationAction(ISD::MSCATTER, VT, Custom);
1232     }
1233 
1234     // Firstly, exclude all scalable vector extending loads/truncating stores,
1235     // include both integer and floating scalable vector.
1236     for (MVT VT : MVT::scalable_vector_valuetypes()) {
1237       for (MVT InnerVT : MVT::scalable_vector_valuetypes()) {
1238         setTruncStoreAction(VT, InnerVT, Expand);
1239         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1240         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1241         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1242       }
1243     }
1244 
1245     // Then, selectively enable those which we directly support.
1246     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i8, Legal);
1247     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i16, Legal);
1248     setTruncStoreAction(MVT::nxv2i64, MVT::nxv2i32, Legal);
1249     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i8, Legal);
1250     setTruncStoreAction(MVT::nxv4i32, MVT::nxv4i16, Legal);
1251     setTruncStoreAction(MVT::nxv8i16, MVT::nxv8i8, Legal);
1252     for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1253       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i8, Legal);
1254       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i16, Legal);
1255       setLoadExtAction(Op, MVT::nxv2i64, MVT::nxv2i32, Legal);
1256       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i8, Legal);
1257       setLoadExtAction(Op, MVT::nxv4i32, MVT::nxv4i16, Legal);
1258       setLoadExtAction(Op, MVT::nxv8i16, MVT::nxv8i8, Legal);
1259     }
1260 
1261     // SVE supports truncating stores of 64 and 128-bit vectors
1262     setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
1263     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);
1264     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Custom);
1265     setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
1266     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
1267 
1268     for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
1269                     MVT::nxv4f32, MVT::nxv2f64}) {
1270       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1271       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1272       setOperationAction(ISD::MGATHER, VT, Custom);
1273       setOperationAction(ISD::MSCATTER, VT, Custom);
1274       setOperationAction(ISD::MLOAD, VT, Custom);
1275       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1276       setOperationAction(ISD::SELECT, VT, Custom);
1277       setOperationAction(ISD::FADD, VT, Custom);
1278       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1279       setOperationAction(ISD::FDIV, VT, Custom);
1280       setOperationAction(ISD::FMA, VT, Custom);
1281       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1282       setOperationAction(ISD::FMAXNUM, VT, Custom);
1283       setOperationAction(ISD::FMINIMUM, VT, Custom);
1284       setOperationAction(ISD::FMINNUM, VT, Custom);
1285       setOperationAction(ISD::FMUL, VT, Custom);
1286       setOperationAction(ISD::FNEG, VT, Custom);
1287       setOperationAction(ISD::FSUB, VT, Custom);
1288       setOperationAction(ISD::FCEIL, VT, Custom);
1289       setOperationAction(ISD::FFLOOR, VT, Custom);
1290       setOperationAction(ISD::FNEARBYINT, VT, Custom);
1291       setOperationAction(ISD::FRINT, VT, Custom);
1292       setOperationAction(ISD::FROUND, VT, Custom);
1293       setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1294       setOperationAction(ISD::FTRUNC, VT, Custom);
1295       setOperationAction(ISD::FSQRT, VT, Custom);
1296       setOperationAction(ISD::FABS, VT, Custom);
1297       setOperationAction(ISD::FP_EXTEND, VT, Custom);
1298       setOperationAction(ISD::FP_ROUND, VT, Custom);
1299       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1300       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1301       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1302       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1303       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1304 
1305       setOperationAction(ISD::SELECT_CC, VT, Expand);
1306       setOperationAction(ISD::FREM, VT, Expand);
1307       setOperationAction(ISD::FPOW, VT, Expand);
1308       setOperationAction(ISD::FPOWI, VT, Expand);
1309       setOperationAction(ISD::FCOS, VT, Expand);
1310       setOperationAction(ISD::FSIN, VT, Expand);
1311       setOperationAction(ISD::FSINCOS, VT, Expand);
1312       setOperationAction(ISD::FEXP, VT, Expand);
1313       setOperationAction(ISD::FEXP2, VT, Expand);
1314       setOperationAction(ISD::FLOG, VT, Expand);
1315       setOperationAction(ISD::FLOG2, VT, Expand);
1316       setOperationAction(ISD::FLOG10, VT, Expand);
1317 
1318       setCondCodeAction(ISD::SETO, VT, Expand);
1319       setCondCodeAction(ISD::SETOLT, VT, Expand);
1320       setCondCodeAction(ISD::SETLT, VT, Expand);
1321       setCondCodeAction(ISD::SETOLE, VT, Expand);
1322       setCondCodeAction(ISD::SETLE, VT, Expand);
1323       setCondCodeAction(ISD::SETULT, VT, Expand);
1324       setCondCodeAction(ISD::SETULE, VT, Expand);
1325       setCondCodeAction(ISD::SETUGE, VT, Expand);
1326       setCondCodeAction(ISD::SETUGT, VT, Expand);
1327       setCondCodeAction(ISD::SETUEQ, VT, Expand);
1328       setCondCodeAction(ISD::SETONE, VT, Expand);
1329     }
1330 
1331     for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
1332       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1333       setOperationAction(ISD::MGATHER, VT, Custom);
1334       setOperationAction(ISD::MSCATTER, VT, Custom);
1335       setOperationAction(ISD::MLOAD, VT, Custom);
1336       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1337       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1338     }
1339 
1340     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
1341     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
1342 
1343     // NEON doesn't support integer divides, but SVE does
1344     for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
1345                     MVT::v4i32, MVT::v1i64, MVT::v2i64}) {
1346       setOperationAction(ISD::SDIV, VT, Custom);
1347       setOperationAction(ISD::UDIV, VT, Custom);
1348     }
1349 
1350     // NEON doesn't support 64-bit vector integer muls, but SVE does.
1351     setOperationAction(ISD::MUL, MVT::v1i64, Custom);
1352     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1353 
1354     // NOTE: Currently this has to happen after computeRegisterProperties rather
1355     // than the preferred option of combining it with the addRegisterClass call.
1356     if (Subtarget->useSVEForFixedLengthVectors()) {
1357       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
1358         if (useSVEForFixedLengthVectorVT(VT))
1359           addTypeForFixedLengthSVE(VT);
1360       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
1361         if (useSVEForFixedLengthVectorVT(VT))
1362           addTypeForFixedLengthSVE(VT);
1363 
1364       // 64bit results can mean a bigger than NEON input.
1365       for (auto VT : {MVT::v8i8, MVT::v4i16})
1366         setOperationAction(ISD::TRUNCATE, VT, Custom);
1367       setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
1368 
1369       // 128bit results imply a bigger than NEON input.
1370       for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
1371         setOperationAction(ISD::TRUNCATE, VT, Custom);
1372       for (auto VT : {MVT::v8f16, MVT::v4f32})
1373         setOperationAction(ISD::FP_ROUND, VT, Custom);
1374 
1375       // These operations are not supported on NEON but SVE can do them.
1376       setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
1377       setOperationAction(ISD::CTLZ, MVT::v1i64, Custom);
1378       setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1379       setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
1380       setOperationAction(ISD::MULHS, MVT::v1i64, Custom);
1381       setOperationAction(ISD::MULHS, MVT::v2i64, Custom);
1382       setOperationAction(ISD::MULHU, MVT::v1i64, Custom);
1383       setOperationAction(ISD::MULHU, MVT::v2i64, Custom);
1384       setOperationAction(ISD::SMAX, MVT::v1i64, Custom);
1385       setOperationAction(ISD::SMAX, MVT::v2i64, Custom);
1386       setOperationAction(ISD::SMIN, MVT::v1i64, Custom);
1387       setOperationAction(ISD::SMIN, MVT::v2i64, Custom);
1388       setOperationAction(ISD::UMAX, MVT::v1i64, Custom);
1389       setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
1390       setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
1391       setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
1392       setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
1393       setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
1394       setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
1395       setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
1396 
1397       // Int operations with no NEON support.
1398       for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
1399                       MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
1400         setOperationAction(ISD::BITREVERSE, VT, Custom);
1401         setOperationAction(ISD::CTTZ, VT, Custom);
1402         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1403         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1404         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1405       }
1406 
1407       // FP operations with no NEON support.
1408       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32,
1409                       MVT::v1f64, MVT::v2f64})
1410         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1411 
1412       // Use SVE for vectors with more than 2 elements.
1413       for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32})
1414         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1415     }
1416 
1417     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64);
1418     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv4i1, MVT::nxv4i32);
1419     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv8i1, MVT::nxv8i16);
1420     setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv16i1, MVT::nxv16i8);
1421 
1422     setOperationAction(ISD::VSCALE, MVT::i32, Custom);
1423   }
1424 
1425   if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
1426     // Only required for llvm.aarch64.mops.memset.tag
1427     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
1428   }
1429 
1430   PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
1431 
1432   IsStrictFPEnabled = true;
1433 }
1434 
1435 void AArch64TargetLowering::addTypeForNEON(MVT VT) {
1436   assert(VT.isVector() && "VT should be a vector type");
1437 
1438   if (VT.isFloatingPoint()) {
1439     MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT();
1440     setOperationPromotedToType(ISD::LOAD, VT, PromoteTo);
1441     setOperationPromotedToType(ISD::STORE, VT, PromoteTo);
1442   }
1443 
1444   // Mark vector float intrinsics as expand.
1445   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
1446     setOperationAction(ISD::FSIN, VT, Expand);
1447     setOperationAction(ISD::FCOS, VT, Expand);
1448     setOperationAction(ISD::FPOW, VT, Expand);
1449     setOperationAction(ISD::FLOG, VT, Expand);
1450     setOperationAction(ISD::FLOG2, VT, Expand);
1451     setOperationAction(ISD::FLOG10, VT, Expand);
1452     setOperationAction(ISD::FEXP, VT, Expand);
1453     setOperationAction(ISD::FEXP2, VT, Expand);
1454   }
1455 
1456   // But we do support custom-lowering for FCOPYSIGN.
1457   if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
1458       ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16()))
1459     setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1460 
1461   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1462   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1463   setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1464   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1465   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1466   setOperationAction(ISD::SRA, VT, Custom);
1467   setOperationAction(ISD::SRL, VT, Custom);
1468   setOperationAction(ISD::SHL, VT, Custom);
1469   setOperationAction(ISD::OR, VT, Custom);
1470   setOperationAction(ISD::SETCC, VT, Custom);
1471   setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
1472 
1473   setOperationAction(ISD::SELECT, VT, Expand);
1474   setOperationAction(ISD::SELECT_CC, VT, Expand);
1475   setOperationAction(ISD::VSELECT, VT, Expand);
1476   for (MVT InnerVT : MVT::all_valuetypes())
1477     setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1478 
1479   // CNT supports only B element sizes, then use UADDLP to widen.
1480   if (VT != MVT::v8i8 && VT != MVT::v16i8)
1481     setOperationAction(ISD::CTPOP, VT, Custom);
1482 
1483   setOperationAction(ISD::UDIV, VT, Expand);
1484   setOperationAction(ISD::SDIV, VT, Expand);
1485   setOperationAction(ISD::UREM, VT, Expand);
1486   setOperationAction(ISD::SREM, VT, Expand);
1487   setOperationAction(ISD::FREM, VT, Expand);
1488 
1489   for (unsigned Opcode :
1490        {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT,
1491         ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1492     setOperationAction(Opcode, VT, Custom);
1493 
1494   if (!VT.isFloatingPoint())
1495     setOperationAction(ISD::ABS, VT, Legal);
1496 
1497   // [SU][MIN|MAX] are available for all NEON types apart from i64.
1498   if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
1499     for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
1500       setOperationAction(Opcode, VT, Legal);
1501 
1502   // F[MIN|MAX][NUM|NAN] and simple strict operations are available for all FP
1503   // NEON types.
1504   if (VT.isFloatingPoint() &&
1505       VT.getVectorElementType() != MVT::bf16 &&
1506       (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()))
1507     for (unsigned Opcode :
1508          {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM,
1509           ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM,
1510           ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB,
1511           ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA,
1512           ISD::STRICT_FSQRT})
1513       setOperationAction(Opcode, VT, Legal);
1514 
1515   // Strict fp extend and trunc are legal
1516   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16)
1517     setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
1518   if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 64)
1519     setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
1520 
1521   // FIXME: We could potentially make use of the vector comparison instructions
1522   // for STRICT_FSETCC and STRICT_FSETCSS, but there's a number of
1523   // complications:
1524   //  * FCMPEQ/NE are quiet comparisons, the rest are signalling comparisons,
1525   //    so we would need to expand when the condition code doesn't match the
1526   //    kind of comparison.
1527   //  * Some kinds of comparison require more than one FCMXY instruction so
1528   //    would need to be expanded instead.
1529   //  * The lowering of the non-strict versions involves target-specific ISD
1530   //    nodes so we would likely need to add strict versions of all of them and
1531   //    handle them appropriately.
1532   setOperationAction(ISD::STRICT_FSETCC, VT, Expand);
1533   setOperationAction(ISD::STRICT_FSETCCS, VT, Expand);
1534 
1535   if (Subtarget->isLittleEndian()) {
1536     for (unsigned im = (unsigned)ISD::PRE_INC;
1537          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1538       setIndexedLoadAction(im, VT, Legal);
1539       setIndexedStoreAction(im, VT, Legal);
1540     }
1541   }
1542 }
1543 
1544 bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
1545                                                           EVT OpVT) const {
1546   // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo).
1547   if (!Subtarget->hasSVE())
1548     return true;
1549 
1550   // We can only support legal predicate result types. We can use the SVE
1551   // whilelo instruction for generating fixed-width predicates too.
1552   if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 &&
1553       ResVT != MVT::nxv16i1 && ResVT != MVT::v2i1 && ResVT != MVT::v4i1 &&
1554       ResVT != MVT::v8i1 && ResVT != MVT::v16i1)
1555     return true;
1556 
1557   // The whilelo instruction only works with i32 or i64 scalar inputs.
1558   if (OpVT != MVT::i32 && OpVT != MVT::i64)
1559     return true;
1560 
1561   return false;
1562 }
1563 
1564 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1565   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
1566 
1567   // By default everything must be expanded.
1568   for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1569     setOperationAction(Op, VT, Expand);
1570 
1571   // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one.
1572   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1573 
1574   if (VT.isFloatingPoint()) {
1575     setCondCodeAction(ISD::SETO, VT, Expand);
1576     setCondCodeAction(ISD::SETOLT, VT, Expand);
1577     setCondCodeAction(ISD::SETLT, VT, Expand);
1578     setCondCodeAction(ISD::SETOLE, VT, Expand);
1579     setCondCodeAction(ISD::SETLE, VT, Expand);
1580     setCondCodeAction(ISD::SETULT, VT, Expand);
1581     setCondCodeAction(ISD::SETULE, VT, Expand);
1582     setCondCodeAction(ISD::SETUGE, VT, Expand);
1583     setCondCodeAction(ISD::SETUGT, VT, Expand);
1584     setCondCodeAction(ISD::SETUEQ, VT, Expand);
1585     setCondCodeAction(ISD::SETONE, VT, Expand);
1586   }
1587 
1588   // Mark integer truncating stores/extending loads as having custom lowering
1589   if (VT.isInteger()) {
1590     MVT InnerVT = VT.changeVectorElementType(MVT::i8);
1591     while (InnerVT != VT) {
1592       setTruncStoreAction(VT, InnerVT, Custom);
1593       setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
1594       setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
1595       InnerVT = InnerVT.changeVectorElementType(
1596           MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
1597     }
1598   }
1599 
1600   // Mark floating-point truncating stores/extending loads as having custom
1601   // lowering
1602   if (VT.isFloatingPoint()) {
1603     MVT InnerVT = VT.changeVectorElementType(MVT::f16);
1604     while (InnerVT != VT) {
1605       setTruncStoreAction(VT, InnerVT, Custom);
1606       setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
1607       InnerVT = InnerVT.changeVectorElementType(
1608           MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
1609     }
1610   }
1611 
1612   // Lower fixed length vector operations to scalable equivalents.
1613   setOperationAction(ISD::ABS, VT, Custom);
1614   setOperationAction(ISD::ADD, VT, Custom);
1615   setOperationAction(ISD::AND, VT, Custom);
1616   setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1617   setOperationAction(ISD::BITCAST, VT, Custom);
1618   setOperationAction(ISD::BITREVERSE, VT, Custom);
1619   setOperationAction(ISD::BSWAP, VT, Custom);
1620   setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1621   setOperationAction(ISD::CTLZ, VT, Custom);
1622   setOperationAction(ISD::CTPOP, VT, Custom);
1623   setOperationAction(ISD::CTTZ, VT, Custom);
1624   setOperationAction(ISD::FABS, VT, Custom);
1625   setOperationAction(ISD::FADD, VT, Custom);
1626   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1627   setOperationAction(ISD::FCEIL, VT, Custom);
1628   setOperationAction(ISD::FDIV, VT, Custom);
1629   setOperationAction(ISD::FFLOOR, VT, Custom);
1630   setOperationAction(ISD::FMA, VT, Custom);
1631   setOperationAction(ISD::FMAXIMUM, VT, Custom);
1632   setOperationAction(ISD::FMAXNUM, VT, Custom);
1633   setOperationAction(ISD::FMINIMUM, VT, Custom);
1634   setOperationAction(ISD::FMINNUM, VT, Custom);
1635   setOperationAction(ISD::FMUL, VT, Custom);
1636   setOperationAction(ISD::FNEARBYINT, VT, Custom);
1637   setOperationAction(ISD::FNEG, VT, Custom);
1638   setOperationAction(ISD::FP_EXTEND, VT, Custom);
1639   setOperationAction(ISD::FP_ROUND, VT, Custom);
1640   setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1641   setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1642   setOperationAction(ISD::FRINT, VT, Custom);
1643   setOperationAction(ISD::FROUND, VT, Custom);
1644   setOperationAction(ISD::FROUNDEVEN, VT, Custom);
1645   setOperationAction(ISD::FSQRT, VT, Custom);
1646   setOperationAction(ISD::FSUB, VT, Custom);
1647   setOperationAction(ISD::FTRUNC, VT, Custom);
1648   setOperationAction(ISD::LOAD, VT, Custom);
1649   setOperationAction(ISD::MGATHER, VT, Custom);
1650   setOperationAction(ISD::MLOAD, VT, Custom);
1651   setOperationAction(ISD::MSCATTER, VT, Custom);
1652   setOperationAction(ISD::MSTORE, VT, Custom);
1653   setOperationAction(ISD::MUL, VT, Custom);
1654   setOperationAction(ISD::MULHS, VT, Custom);
1655   setOperationAction(ISD::MULHU, VT, Custom);
1656   setOperationAction(ISD::OR, VT, Custom);
1657   setOperationAction(ISD::SDIV, VT, Custom);
1658   setOperationAction(ISD::SELECT, VT, Custom);
1659   setOperationAction(ISD::SETCC, VT, Custom);
1660   setOperationAction(ISD::SHL, VT, Custom);
1661   setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1662   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1663   setOperationAction(ISD::SINT_TO_FP, VT, Custom);
1664   setOperationAction(ISD::SMAX, VT, Custom);
1665   setOperationAction(ISD::SMIN, VT, Custom);
1666   setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1667   setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
1668   setOperationAction(ISD::SRA, VT, Custom);
1669   setOperationAction(ISD::SRL, VT, Custom);
1670   setOperationAction(ISD::STORE, VT, Custom);
1671   setOperationAction(ISD::SUB, VT, Custom);
1672   setOperationAction(ISD::TRUNCATE, VT, Custom);
1673   setOperationAction(ISD::UDIV, VT, Custom);
1674   setOperationAction(ISD::UINT_TO_FP, VT, Custom);
1675   setOperationAction(ISD::UMAX, VT, Custom);
1676   setOperationAction(ISD::UMIN, VT, Custom);
1677   setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
1678   setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
1679   setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1680   setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1681   setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1682   setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1683   setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
1684   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1685   setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
1686   setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
1687   setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
1688   setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
1689   setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
1690   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1691   setOperationAction(ISD::VSELECT, VT, Custom);
1692   setOperationAction(ISD::XOR, VT, Custom);
1693   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1694 }
1695 
1696 void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
1697   addRegisterClass(VT, &AArch64::FPR64RegClass);
1698   addTypeForNEON(VT);
1699 }
1700 
1701 void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
1702   addRegisterClass(VT, &AArch64::FPR128RegClass);
1703   addTypeForNEON(VT);
1704 }
1705 
1706 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
1707                                               LLVMContext &C, EVT VT) const {
1708   if (!VT.isVector())
1709     return MVT::i32;
1710   if (VT.isScalableVector())
1711     return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
1712   return VT.changeVectorElementTypeToInteger();
1713 }
1714 
1715 static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
1716                                const APInt &Demanded,
1717                                TargetLowering::TargetLoweringOpt &TLO,
1718                                unsigned NewOpc) {
1719   uint64_t OldImm = Imm, NewImm, Enc;
1720   uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
1721 
1722   // Return if the immediate is already all zeros, all ones, a bimm32 or a
1723   // bimm64.
1724   if (Imm == 0 || Imm == Mask ||
1725       AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
1726     return false;
1727 
1728   unsigned EltSize = Size;
1729   uint64_t DemandedBits = Demanded.getZExtValue();
1730 
1731   // Clear bits that are not demanded.
1732   Imm &= DemandedBits;
1733 
1734   while (true) {
1735     // The goal here is to set the non-demanded bits in a way that minimizes
1736     // the number of switching between 0 and 1. In order to achieve this goal,
1737     // we set the non-demanded bits to the value of the preceding demanded bits.
1738     // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
1739     // non-demanded bit), we copy bit0 (1) to the least significant 'x',
1740     // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
1741     // The final result is 0b11000011.
1742     uint64_t NonDemandedBits = ~DemandedBits;
1743     uint64_t InvertedImm = ~Imm & DemandedBits;
1744     uint64_t RotatedImm =
1745         ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
1746         NonDemandedBits;
1747     uint64_t Sum = RotatedImm + NonDemandedBits;
1748     bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
1749     uint64_t Ones = (Sum + Carry) & NonDemandedBits;
1750     NewImm = (Imm | Ones) & Mask;
1751 
1752     // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
1753     // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
1754     // we halve the element size and continue the search.
1755     if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
1756       break;
1757 
1758     // We cannot shrink the element size any further if it is 2-bits.
1759     if (EltSize == 2)
1760       return false;
1761 
1762     EltSize /= 2;
1763     Mask >>= EltSize;
1764     uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
1765 
1766     // Return if there is mismatch in any of the demanded bits of Imm and Hi.
1767     if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
1768       return false;
1769 
1770     // Merge the upper and lower halves of Imm and DemandedBits.
1771     Imm |= Hi;
1772     DemandedBits |= DemandedBitsHi;
1773   }
1774 
1775   ++NumOptimizedImms;
1776 
1777   // Replicate the element across the register width.
1778   while (EltSize < Size) {
1779     NewImm |= NewImm << EltSize;
1780     EltSize *= 2;
1781   }
1782 
1783   (void)OldImm;
1784   assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
1785          "demanded bits should never be altered");
1786   assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
1787 
1788   // Create the new constant immediate node.
1789   EVT VT = Op.getValueType();
1790   SDLoc DL(Op);
1791   SDValue New;
1792 
1793   // If the new constant immediate is all-zeros or all-ones, let the target
1794   // independent DAG combine optimize this node.
1795   if (NewImm == 0 || NewImm == OrigMask) {
1796     New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
1797                           TLO.DAG.getConstant(NewImm, DL, VT));
1798   // Otherwise, create a machine node so that target independent DAG combine
1799   // doesn't undo this optimization.
1800   } else {
1801     Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
1802     SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
1803     New = SDValue(
1804         TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
1805   }
1806 
1807   return TLO.CombineTo(Op, New);
1808 }
1809 
1810 bool AArch64TargetLowering::targetShrinkDemandedConstant(
1811     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
1812     TargetLoweringOpt &TLO) const {
1813   // Delay this optimization to as late as possible.
1814   if (!TLO.LegalOps)
1815     return false;
1816 
1817   if (!EnableOptimizeLogicalImm)
1818     return false;
1819 
1820   EVT VT = Op.getValueType();
1821   if (VT.isVector())
1822     return false;
1823 
1824   unsigned Size = VT.getSizeInBits();
1825   assert((Size == 32 || Size == 64) &&
1826          "i32 or i64 is expected after legalization.");
1827 
1828   // Exit early if we demand all bits.
1829   if (DemandedBits.countPopulation() == Size)
1830     return false;
1831 
1832   unsigned NewOpc;
1833   switch (Op.getOpcode()) {
1834   default:
1835     return false;
1836   case ISD::AND:
1837     NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
1838     break;
1839   case ISD::OR:
1840     NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
1841     break;
1842   case ISD::XOR:
1843     NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1844     break;
1845   }
1846   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1847   if (!C)
1848     return false;
1849   uint64_t Imm = C->getZExtValue();
1850   return optimizeLogicalImm(Op, Size, Imm, DemandedBits, TLO, NewOpc);
1851 }
1852 
1853 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
1854 /// Mask are known to be either zero or one and return them Known.
1855 void AArch64TargetLowering::computeKnownBitsForTargetNode(
1856     const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1857     const SelectionDAG &DAG, unsigned Depth) const {
1858   switch (Op.getOpcode()) {
1859   default:
1860     break;
1861   case AArch64ISD::DUP: {
1862     SDValue SrcOp = Op.getOperand(0);
1863     Known = DAG.computeKnownBits(SrcOp, Depth + 1);
1864     if (SrcOp.getValueSizeInBits() != Op.getScalarValueSizeInBits()) {
1865       assert(SrcOp.getValueSizeInBits() > Op.getScalarValueSizeInBits() &&
1866              "Expected DUP implicit truncation");
1867       Known = Known.trunc(Op.getScalarValueSizeInBits());
1868     }
1869     break;
1870   }
1871   case AArch64ISD::CSEL: {
1872     KnownBits Known2;
1873     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1874     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1875     Known = KnownBits::commonBits(Known, Known2);
1876     break;
1877   }
1878   case AArch64ISD::BICi: {
1879     // Compute the bit cleared value.
1880     uint64_t Mask =
1881         ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
1882     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1883     Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
1884     break;
1885   }
1886   case AArch64ISD::VLSHR: {
1887     KnownBits Known2;
1888     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1889     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1890     Known = KnownBits::lshr(Known, Known2);
1891     break;
1892   }
1893   case AArch64ISD::VASHR: {
1894     KnownBits Known2;
1895     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1896     Known2 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1);
1897     Known = KnownBits::ashr(Known, Known2);
1898     break;
1899   }
1900   case AArch64ISD::LOADgot:
1901   case AArch64ISD::ADDlow: {
1902     if (!Subtarget->isTargetILP32())
1903       break;
1904     // In ILP32 mode all valid pointers are in the low 4GB of the address-space.
1905     Known.Zero = APInt::getHighBitsSet(64, 32);
1906     break;
1907   }
1908   case AArch64ISD::ASSERT_ZEXT_BOOL: {
1909     Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
1910     Known.Zero |= APInt(Known.getBitWidth(), 0xFE);
1911     break;
1912   }
1913   case ISD::INTRINSIC_W_CHAIN: {
1914     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
1915     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
1916     switch (IntID) {
1917     default: return;
1918     case Intrinsic::aarch64_ldaxr:
1919     case Intrinsic::aarch64_ldxr: {
1920       unsigned BitWidth = Known.getBitWidth();
1921       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
1922       unsigned MemBits = VT.getScalarSizeInBits();
1923       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1924       return;
1925     }
1926     }
1927     break;
1928   }
1929   case ISD::INTRINSIC_WO_CHAIN:
1930   case ISD::INTRINSIC_VOID: {
1931     unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1932     switch (IntNo) {
1933     default:
1934       break;
1935     case Intrinsic::aarch64_neon_umaxv:
1936     case Intrinsic::aarch64_neon_uminv: {
1937       // Figure out the datatype of the vector operand. The UMINV instruction
1938       // will zero extend the result, so we can mark as known zero all the
1939       // bits larger than the element datatype. 32-bit or larget doesn't need
1940       // this as those are legal types and will be handled by isel directly.
1941       MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
1942       unsigned BitWidth = Known.getBitWidth();
1943       if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1944         assert(BitWidth >= 8 && "Unexpected width!");
1945         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
1946         Known.Zero |= Mask;
1947       } else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1948         assert(BitWidth >= 16 && "Unexpected width!");
1949         APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
1950         Known.Zero |= Mask;
1951       }
1952       break;
1953     } break;
1954     }
1955   }
1956   }
1957 }
1958 
1959 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
1960                                                   EVT) const {
1961   return MVT::i64;
1962 }
1963 
1964 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1965     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1966     bool *Fast) const {
1967   if (Subtarget->requiresStrictAlign())
1968     return false;
1969 
1970   if (Fast) {
1971     // Some CPUs are fine with unaligned stores except for 128-bit ones.
1972     *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
1973             // See comments in performSTORECombine() for more details about
1974             // these conditions.
1975 
1976             // Code that uses clang vector extensions can mark that it
1977             // wants unaligned accesses to be treated as fast by
1978             // underspecifying alignment to be 1 or 2.
1979             Alignment <= 2 ||
1980 
1981             // Disregard v2i64. Memcpy lowering produces those and splitting
1982             // them regresses performance on micro-benchmarks and olden/bh.
1983             VT == MVT::v2i64;
1984   }
1985   return true;
1986 }
1987 
1988 // Same as above but handling LLTs instead.
1989 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
1990     LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1991     bool *Fast) const {
1992   if (Subtarget->requiresStrictAlign())
1993     return false;
1994 
1995   if (Fast) {
1996     // Some CPUs are fine with unaligned stores except for 128-bit ones.
1997     *Fast = !Subtarget->isMisaligned128StoreSlow() ||
1998             Ty.getSizeInBytes() != 16 ||
1999             // See comments in performSTORECombine() for more details about
2000             // these conditions.
2001 
2002             // Code that uses clang vector extensions can mark that it
2003             // wants unaligned accesses to be treated as fast by
2004             // underspecifying alignment to be 1 or 2.
2005             Alignment <= 2 ||
2006 
2007             // Disregard v2i64. Memcpy lowering produces those and splitting
2008             // them regresses performance on micro-benchmarks and olden/bh.
2009             Ty == LLT::fixed_vector(2, 64);
2010   }
2011   return true;
2012 }
2013 
2014 FastISel *
2015 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2016                                       const TargetLibraryInfo *libInfo) const {
2017   return AArch64::createFastISel(funcInfo, libInfo);
2018 }
2019 
2020 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
2021 #define MAKE_CASE(V)                                                           \
2022   case V:                                                                      \
2023     return #V;
2024   switch ((AArch64ISD::NodeType)Opcode) {
2025   case AArch64ISD::FIRST_NUMBER:
2026     break;
2027     MAKE_CASE(AArch64ISD::CALL)
2028     MAKE_CASE(AArch64ISD::ADRP)
2029     MAKE_CASE(AArch64ISD::ADR)
2030     MAKE_CASE(AArch64ISD::ADDlow)
2031     MAKE_CASE(AArch64ISD::LOADgot)
2032     MAKE_CASE(AArch64ISD::RET_FLAG)
2033     MAKE_CASE(AArch64ISD::BRCOND)
2034     MAKE_CASE(AArch64ISD::CSEL)
2035     MAKE_CASE(AArch64ISD::CSINV)
2036     MAKE_CASE(AArch64ISD::CSNEG)
2037     MAKE_CASE(AArch64ISD::CSINC)
2038     MAKE_CASE(AArch64ISD::THREAD_POINTER)
2039     MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
2040     MAKE_CASE(AArch64ISD::ABDS_PRED)
2041     MAKE_CASE(AArch64ISD::ABDU_PRED)
2042     MAKE_CASE(AArch64ISD::MUL_PRED)
2043     MAKE_CASE(AArch64ISD::MULHS_PRED)
2044     MAKE_CASE(AArch64ISD::MULHU_PRED)
2045     MAKE_CASE(AArch64ISD::SDIV_PRED)
2046     MAKE_CASE(AArch64ISD::SHL_PRED)
2047     MAKE_CASE(AArch64ISD::SMAX_PRED)
2048     MAKE_CASE(AArch64ISD::SMIN_PRED)
2049     MAKE_CASE(AArch64ISD::SRA_PRED)
2050     MAKE_CASE(AArch64ISD::SRL_PRED)
2051     MAKE_CASE(AArch64ISD::UDIV_PRED)
2052     MAKE_CASE(AArch64ISD::UMAX_PRED)
2053     MAKE_CASE(AArch64ISD::UMIN_PRED)
2054     MAKE_CASE(AArch64ISD::SRAD_MERGE_OP1)
2055     MAKE_CASE(AArch64ISD::FNEG_MERGE_PASSTHRU)
2056     MAKE_CASE(AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU)
2057     MAKE_CASE(AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU)
2058     MAKE_CASE(AArch64ISD::FCEIL_MERGE_PASSTHRU)
2059     MAKE_CASE(AArch64ISD::FFLOOR_MERGE_PASSTHRU)
2060     MAKE_CASE(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU)
2061     MAKE_CASE(AArch64ISD::FRINT_MERGE_PASSTHRU)
2062     MAKE_CASE(AArch64ISD::FROUND_MERGE_PASSTHRU)
2063     MAKE_CASE(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU)
2064     MAKE_CASE(AArch64ISD::FTRUNC_MERGE_PASSTHRU)
2065     MAKE_CASE(AArch64ISD::FP_ROUND_MERGE_PASSTHRU)
2066     MAKE_CASE(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU)
2067     MAKE_CASE(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU)
2068     MAKE_CASE(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU)
2069     MAKE_CASE(AArch64ISD::FCVTZU_MERGE_PASSTHRU)
2070     MAKE_CASE(AArch64ISD::FCVTZS_MERGE_PASSTHRU)
2071     MAKE_CASE(AArch64ISD::FSQRT_MERGE_PASSTHRU)
2072     MAKE_CASE(AArch64ISD::FRECPX_MERGE_PASSTHRU)
2073     MAKE_CASE(AArch64ISD::FABS_MERGE_PASSTHRU)
2074     MAKE_CASE(AArch64ISD::ABS_MERGE_PASSTHRU)
2075     MAKE_CASE(AArch64ISD::NEG_MERGE_PASSTHRU)
2076     MAKE_CASE(AArch64ISD::SETCC_MERGE_ZERO)
2077     MAKE_CASE(AArch64ISD::ADC)
2078     MAKE_CASE(AArch64ISD::SBC)
2079     MAKE_CASE(AArch64ISD::ADDS)
2080     MAKE_CASE(AArch64ISD::SUBS)
2081     MAKE_CASE(AArch64ISD::ADCS)
2082     MAKE_CASE(AArch64ISD::SBCS)
2083     MAKE_CASE(AArch64ISD::ANDS)
2084     MAKE_CASE(AArch64ISD::CCMP)
2085     MAKE_CASE(AArch64ISD::CCMN)
2086     MAKE_CASE(AArch64ISD::FCCMP)
2087     MAKE_CASE(AArch64ISD::FCMP)
2088     MAKE_CASE(AArch64ISD::STRICT_FCMP)
2089     MAKE_CASE(AArch64ISD::STRICT_FCMPE)
2090     MAKE_CASE(AArch64ISD::DUP)
2091     MAKE_CASE(AArch64ISD::DUPLANE8)
2092     MAKE_CASE(AArch64ISD::DUPLANE16)
2093     MAKE_CASE(AArch64ISD::DUPLANE32)
2094     MAKE_CASE(AArch64ISD::DUPLANE64)
2095     MAKE_CASE(AArch64ISD::DUPLANE128)
2096     MAKE_CASE(AArch64ISD::MOVI)
2097     MAKE_CASE(AArch64ISD::MOVIshift)
2098     MAKE_CASE(AArch64ISD::MOVIedit)
2099     MAKE_CASE(AArch64ISD::MOVImsl)
2100     MAKE_CASE(AArch64ISD::FMOV)
2101     MAKE_CASE(AArch64ISD::MVNIshift)
2102     MAKE_CASE(AArch64ISD::MVNImsl)
2103     MAKE_CASE(AArch64ISD::BICi)
2104     MAKE_CASE(AArch64ISD::ORRi)
2105     MAKE_CASE(AArch64ISD::BSP)
2106     MAKE_CASE(AArch64ISD::EXTR)
2107     MAKE_CASE(AArch64ISD::ZIP1)
2108     MAKE_CASE(AArch64ISD::ZIP2)
2109     MAKE_CASE(AArch64ISD::UZP1)
2110     MAKE_CASE(AArch64ISD::UZP2)
2111     MAKE_CASE(AArch64ISD::TRN1)
2112     MAKE_CASE(AArch64ISD::TRN2)
2113     MAKE_CASE(AArch64ISD::REV16)
2114     MAKE_CASE(AArch64ISD::REV32)
2115     MAKE_CASE(AArch64ISD::REV64)
2116     MAKE_CASE(AArch64ISD::EXT)
2117     MAKE_CASE(AArch64ISD::SPLICE)
2118     MAKE_CASE(AArch64ISD::VSHL)
2119     MAKE_CASE(AArch64ISD::VLSHR)
2120     MAKE_CASE(AArch64ISD::VASHR)
2121     MAKE_CASE(AArch64ISD::VSLI)
2122     MAKE_CASE(AArch64ISD::VSRI)
2123     MAKE_CASE(AArch64ISD::CMEQ)
2124     MAKE_CASE(AArch64ISD::CMGE)
2125     MAKE_CASE(AArch64ISD::CMGT)
2126     MAKE_CASE(AArch64ISD::CMHI)
2127     MAKE_CASE(AArch64ISD::CMHS)
2128     MAKE_CASE(AArch64ISD::FCMEQ)
2129     MAKE_CASE(AArch64ISD::FCMGE)
2130     MAKE_CASE(AArch64ISD::FCMGT)
2131     MAKE_CASE(AArch64ISD::CMEQz)
2132     MAKE_CASE(AArch64ISD::CMGEz)
2133     MAKE_CASE(AArch64ISD::CMGTz)
2134     MAKE_CASE(AArch64ISD::CMLEz)
2135     MAKE_CASE(AArch64ISD::CMLTz)
2136     MAKE_CASE(AArch64ISD::FCMEQz)
2137     MAKE_CASE(AArch64ISD::FCMGEz)
2138     MAKE_CASE(AArch64ISD::FCMGTz)
2139     MAKE_CASE(AArch64ISD::FCMLEz)
2140     MAKE_CASE(AArch64ISD::FCMLTz)
2141     MAKE_CASE(AArch64ISD::SADDV)
2142     MAKE_CASE(AArch64ISD::UADDV)
2143     MAKE_CASE(AArch64ISD::SDOT)
2144     MAKE_CASE(AArch64ISD::UDOT)
2145     MAKE_CASE(AArch64ISD::SMINV)
2146     MAKE_CASE(AArch64ISD::UMINV)
2147     MAKE_CASE(AArch64ISD::SMAXV)
2148     MAKE_CASE(AArch64ISD::UMAXV)
2149     MAKE_CASE(AArch64ISD::SADDV_PRED)
2150     MAKE_CASE(AArch64ISD::UADDV_PRED)
2151     MAKE_CASE(AArch64ISD::SMAXV_PRED)
2152     MAKE_CASE(AArch64ISD::UMAXV_PRED)
2153     MAKE_CASE(AArch64ISD::SMINV_PRED)
2154     MAKE_CASE(AArch64ISD::UMINV_PRED)
2155     MAKE_CASE(AArch64ISD::ORV_PRED)
2156     MAKE_CASE(AArch64ISD::EORV_PRED)
2157     MAKE_CASE(AArch64ISD::ANDV_PRED)
2158     MAKE_CASE(AArch64ISD::CLASTA_N)
2159     MAKE_CASE(AArch64ISD::CLASTB_N)
2160     MAKE_CASE(AArch64ISD::LASTA)
2161     MAKE_CASE(AArch64ISD::LASTB)
2162     MAKE_CASE(AArch64ISD::REINTERPRET_CAST)
2163     MAKE_CASE(AArch64ISD::LS64_BUILD)
2164     MAKE_CASE(AArch64ISD::LS64_EXTRACT)
2165     MAKE_CASE(AArch64ISD::TBL)
2166     MAKE_CASE(AArch64ISD::FADD_PRED)
2167     MAKE_CASE(AArch64ISD::FADDA_PRED)
2168     MAKE_CASE(AArch64ISD::FADDV_PRED)
2169     MAKE_CASE(AArch64ISD::FDIV_PRED)
2170     MAKE_CASE(AArch64ISD::FMA_PRED)
2171     MAKE_CASE(AArch64ISD::FMAX_PRED)
2172     MAKE_CASE(AArch64ISD::FMAXV_PRED)
2173     MAKE_CASE(AArch64ISD::FMAXNM_PRED)
2174     MAKE_CASE(AArch64ISD::FMAXNMV_PRED)
2175     MAKE_CASE(AArch64ISD::FMIN_PRED)
2176     MAKE_CASE(AArch64ISD::FMINV_PRED)
2177     MAKE_CASE(AArch64ISD::FMINNM_PRED)
2178     MAKE_CASE(AArch64ISD::FMINNMV_PRED)
2179     MAKE_CASE(AArch64ISD::FMUL_PRED)
2180     MAKE_CASE(AArch64ISD::FSUB_PRED)
2181     MAKE_CASE(AArch64ISD::RDSVL)
2182     MAKE_CASE(AArch64ISD::BIC)
2183     MAKE_CASE(AArch64ISD::BIT)
2184     MAKE_CASE(AArch64ISD::CBZ)
2185     MAKE_CASE(AArch64ISD::CBNZ)
2186     MAKE_CASE(AArch64ISD::TBZ)
2187     MAKE_CASE(AArch64ISD::TBNZ)
2188     MAKE_CASE(AArch64ISD::TC_RETURN)
2189     MAKE_CASE(AArch64ISD::PREFETCH)
2190     MAKE_CASE(AArch64ISD::SITOF)
2191     MAKE_CASE(AArch64ISD::UITOF)
2192     MAKE_CASE(AArch64ISD::NVCAST)
2193     MAKE_CASE(AArch64ISD::MRS)
2194     MAKE_CASE(AArch64ISD::SQSHL_I)
2195     MAKE_CASE(AArch64ISD::UQSHL_I)
2196     MAKE_CASE(AArch64ISD::SRSHR_I)
2197     MAKE_CASE(AArch64ISD::URSHR_I)
2198     MAKE_CASE(AArch64ISD::SQSHLU_I)
2199     MAKE_CASE(AArch64ISD::WrapperLarge)
2200     MAKE_CASE(AArch64ISD::LD2post)
2201     MAKE_CASE(AArch64ISD::LD3post)
2202     MAKE_CASE(AArch64ISD::LD4post)
2203     MAKE_CASE(AArch64ISD::ST2post)
2204     MAKE_CASE(AArch64ISD::ST3post)
2205     MAKE_CASE(AArch64ISD::ST4post)
2206     MAKE_CASE(AArch64ISD::LD1x2post)
2207     MAKE_CASE(AArch64ISD::LD1x3post)
2208     MAKE_CASE(AArch64ISD::LD1x4post)
2209     MAKE_CASE(AArch64ISD::ST1x2post)
2210     MAKE_CASE(AArch64ISD::ST1x3post)
2211     MAKE_CASE(AArch64ISD::ST1x4post)
2212     MAKE_CASE(AArch64ISD::LD1DUPpost)
2213     MAKE_CASE(AArch64ISD::LD2DUPpost)
2214     MAKE_CASE(AArch64ISD::LD3DUPpost)
2215     MAKE_CASE(AArch64ISD::LD4DUPpost)
2216     MAKE_CASE(AArch64ISD::LD1LANEpost)
2217     MAKE_CASE(AArch64ISD::LD2LANEpost)
2218     MAKE_CASE(AArch64ISD::LD3LANEpost)
2219     MAKE_CASE(AArch64ISD::LD4LANEpost)
2220     MAKE_CASE(AArch64ISD::ST2LANEpost)
2221     MAKE_CASE(AArch64ISD::ST3LANEpost)
2222     MAKE_CASE(AArch64ISD::ST4LANEpost)
2223     MAKE_CASE(AArch64ISD::SMULL)
2224     MAKE_CASE(AArch64ISD::UMULL)
2225     MAKE_CASE(AArch64ISD::FRECPE)
2226     MAKE_CASE(AArch64ISD::FRECPS)
2227     MAKE_CASE(AArch64ISD::FRSQRTE)
2228     MAKE_CASE(AArch64ISD::FRSQRTS)
2229     MAKE_CASE(AArch64ISD::STG)
2230     MAKE_CASE(AArch64ISD::STZG)
2231     MAKE_CASE(AArch64ISD::ST2G)
2232     MAKE_CASE(AArch64ISD::STZ2G)
2233     MAKE_CASE(AArch64ISD::SUNPKHI)
2234     MAKE_CASE(AArch64ISD::SUNPKLO)
2235     MAKE_CASE(AArch64ISD::UUNPKHI)
2236     MAKE_CASE(AArch64ISD::UUNPKLO)
2237     MAKE_CASE(AArch64ISD::INSR)
2238     MAKE_CASE(AArch64ISD::PTEST)
2239     MAKE_CASE(AArch64ISD::PTRUE)
2240     MAKE_CASE(AArch64ISD::LD1_MERGE_ZERO)
2241     MAKE_CASE(AArch64ISD::LD1S_MERGE_ZERO)
2242     MAKE_CASE(AArch64ISD::LDNF1_MERGE_ZERO)
2243     MAKE_CASE(AArch64ISD::LDNF1S_MERGE_ZERO)
2244     MAKE_CASE(AArch64ISD::LDFF1_MERGE_ZERO)
2245     MAKE_CASE(AArch64ISD::LDFF1S_MERGE_ZERO)
2246     MAKE_CASE(AArch64ISD::LD1RQ_MERGE_ZERO)
2247     MAKE_CASE(AArch64ISD::LD1RO_MERGE_ZERO)
2248     MAKE_CASE(AArch64ISD::SVE_LD2_MERGE_ZERO)
2249     MAKE_CASE(AArch64ISD::SVE_LD3_MERGE_ZERO)
2250     MAKE_CASE(AArch64ISD::SVE_LD4_MERGE_ZERO)
2251     MAKE_CASE(AArch64ISD::GLD1_MERGE_ZERO)
2252     MAKE_CASE(AArch64ISD::GLD1_SCALED_MERGE_ZERO)
2253     MAKE_CASE(AArch64ISD::GLD1_SXTW_MERGE_ZERO)
2254     MAKE_CASE(AArch64ISD::GLD1_UXTW_MERGE_ZERO)
2255     MAKE_CASE(AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO)
2256     MAKE_CASE(AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO)
2257     MAKE_CASE(AArch64ISD::GLD1_IMM_MERGE_ZERO)
2258     MAKE_CASE(AArch64ISD::GLD1S_MERGE_ZERO)
2259     MAKE_CASE(AArch64ISD::GLD1S_SCALED_MERGE_ZERO)
2260     MAKE_CASE(AArch64ISD::GLD1S_SXTW_MERGE_ZERO)
2261     MAKE_CASE(AArch64ISD::GLD1S_UXTW_MERGE_ZERO)
2262     MAKE_CASE(AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO)
2263     MAKE_CASE(AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO)
2264     MAKE_CASE(AArch64ISD::GLD1S_IMM_MERGE_ZERO)
2265     MAKE_CASE(AArch64ISD::GLDFF1_MERGE_ZERO)
2266     MAKE_CASE(AArch64ISD::GLDFF1_SCALED_MERGE_ZERO)
2267     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_MERGE_ZERO)
2268     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_MERGE_ZERO)
2269     MAKE_CASE(AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO)
2270     MAKE_CASE(AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO)
2271     MAKE_CASE(AArch64ISD::GLDFF1_IMM_MERGE_ZERO)
2272     MAKE_CASE(AArch64ISD::GLDFF1S_MERGE_ZERO)
2273     MAKE_CASE(AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO)
2274     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO)
2275     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO)
2276     MAKE_CASE(AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO)
2277     MAKE_CASE(AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO)
2278     MAKE_CASE(AArch64ISD::GLDFF1S_IMM_MERGE_ZERO)
2279     MAKE_CASE(AArch64ISD::GLDNT1_MERGE_ZERO)
2280     MAKE_CASE(AArch64ISD::GLDNT1_INDEX_MERGE_ZERO)
2281     MAKE_CASE(AArch64ISD::GLDNT1S_MERGE_ZERO)
2282     MAKE_CASE(AArch64ISD::ST1_PRED)
2283     MAKE_CASE(AArch64ISD::SST1_PRED)
2284     MAKE_CASE(AArch64ISD::SST1_SCALED_PRED)
2285     MAKE_CASE(AArch64ISD::SST1_SXTW_PRED)
2286     MAKE_CASE(AArch64ISD::SST1_UXTW_PRED)
2287     MAKE_CASE(AArch64ISD::SST1_SXTW_SCALED_PRED)
2288     MAKE_CASE(AArch64ISD::SST1_UXTW_SCALED_PRED)
2289     MAKE_CASE(AArch64ISD::SST1_IMM_PRED)
2290     MAKE_CASE(AArch64ISD::SSTNT1_PRED)
2291     MAKE_CASE(AArch64ISD::SSTNT1_INDEX_PRED)
2292     MAKE_CASE(AArch64ISD::LDP)
2293     MAKE_CASE(AArch64ISD::STP)
2294     MAKE_CASE(AArch64ISD::STNP)
2295     MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
2296     MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
2297     MAKE_CASE(AArch64ISD::REVH_MERGE_PASSTHRU)
2298     MAKE_CASE(AArch64ISD::REVW_MERGE_PASSTHRU)
2299     MAKE_CASE(AArch64ISD::REVD_MERGE_PASSTHRU)
2300     MAKE_CASE(AArch64ISD::CTLZ_MERGE_PASSTHRU)
2301     MAKE_CASE(AArch64ISD::CTPOP_MERGE_PASSTHRU)
2302     MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
2303     MAKE_CASE(AArch64ISD::INDEX_VECTOR)
2304     MAKE_CASE(AArch64ISD::ADDP)
2305     MAKE_CASE(AArch64ISD::SADDLP)
2306     MAKE_CASE(AArch64ISD::UADDLP)
2307     MAKE_CASE(AArch64ISD::CALL_RVMARKER)
2308     MAKE_CASE(AArch64ISD::ASSERT_ZEXT_BOOL)
2309     MAKE_CASE(AArch64ISD::MOPS_MEMSET)
2310     MAKE_CASE(AArch64ISD::MOPS_MEMSET_TAGGING)
2311     MAKE_CASE(AArch64ISD::MOPS_MEMCOPY)
2312     MAKE_CASE(AArch64ISD::MOPS_MEMMOVE)
2313     MAKE_CASE(AArch64ISD::CALL_BTI)
2314   }
2315 #undef MAKE_CASE
2316   return nullptr;
2317 }
2318 
2319 MachineBasicBlock *
2320 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
2321                                     MachineBasicBlock *MBB) const {
2322   // We materialise the F128CSEL pseudo-instruction as some control flow and a
2323   // phi node:
2324 
2325   // OrigBB:
2326   //     [... previous instrs leading to comparison ...]
2327   //     b.ne TrueBB
2328   //     b EndBB
2329   // TrueBB:
2330   //     ; Fallthrough
2331   // EndBB:
2332   //     Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
2333 
2334   MachineFunction *MF = MBB->getParent();
2335   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2336   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2337   DebugLoc DL = MI.getDebugLoc();
2338   MachineFunction::iterator It = ++MBB->getIterator();
2339 
2340   Register DestReg = MI.getOperand(0).getReg();
2341   Register IfTrueReg = MI.getOperand(1).getReg();
2342   Register IfFalseReg = MI.getOperand(2).getReg();
2343   unsigned CondCode = MI.getOperand(3).getImm();
2344   bool NZCVKilled = MI.getOperand(4).isKill();
2345 
2346   MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
2347   MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
2348   MF->insert(It, TrueBB);
2349   MF->insert(It, EndBB);
2350 
2351   // Transfer rest of current basic-block to EndBB
2352   EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
2353                 MBB->end());
2354   EndBB->transferSuccessorsAndUpdatePHIs(MBB);
2355 
2356   BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB);
2357   BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB);
2358   MBB->addSuccessor(TrueBB);
2359   MBB->addSuccessor(EndBB);
2360 
2361   // TrueBB falls through to the end.
2362   TrueBB->addSuccessor(EndBB);
2363 
2364   if (!NZCVKilled) {
2365     TrueBB->addLiveIn(AArch64::NZCV);
2366     EndBB->addLiveIn(AArch64::NZCV);
2367   }
2368 
2369   BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg)
2370       .addReg(IfTrueReg)
2371       .addMBB(TrueBB)
2372       .addReg(IfFalseReg)
2373       .addMBB(MBB);
2374 
2375   MI.eraseFromParent();
2376   return EndBB;
2377 }
2378 
2379 MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
2380        MachineInstr &MI, MachineBasicBlock *BB) const {
2381   assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2382              BB->getParent()->getFunction().getPersonalityFn())) &&
2383          "SEH does not use catchret!");
2384   return BB;
2385 }
2386 
2387 MachineBasicBlock *
2388 AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
2389                                     MachineInstr &MI,
2390                                     MachineBasicBlock *BB) const {
2391   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2392   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2393 
2394   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2395   MIB.add(MI.getOperand(1)); // slice index register
2396   MIB.add(MI.getOperand(2)); // slice index offset
2397   MIB.add(MI.getOperand(3)); // pg
2398   MIB.add(MI.getOperand(4)); // base
2399   MIB.add(MI.getOperand(5)); // offset
2400 
2401   MI.eraseFromParent(); // The pseudo is gone now.
2402   return BB;
2403 }
2404 
2405 MachineBasicBlock *
2406 AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {
2407   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2408   MachineInstrBuilder MIB =
2409       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::LDR_ZA));
2410 
2411   MIB.addReg(AArch64::ZA, RegState::Define);
2412   MIB.add(MI.getOperand(0)); // Vector select register
2413   MIB.add(MI.getOperand(1)); // Vector select offset
2414   MIB.add(MI.getOperand(2)); // Base
2415   MIB.add(MI.getOperand(1)); // Offset, same as vector select offset
2416 
2417   MI.eraseFromParent(); // The pseudo is gone now.
2418   return BB;
2419 }
2420 
2421 MachineBasicBlock *
2422 AArch64TargetLowering::EmitMopa(unsigned Opc, unsigned BaseReg,
2423                                 MachineInstr &MI, MachineBasicBlock *BB) const {
2424   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2425   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2426 
2427   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2428   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2429   MIB.add(MI.getOperand(1)); // pn
2430   MIB.add(MI.getOperand(2)); // pm
2431   MIB.add(MI.getOperand(3)); // zn
2432   MIB.add(MI.getOperand(4)); // zm
2433 
2434   MI.eraseFromParent(); // The pseudo is gone now.
2435   return BB;
2436 }
2437 
2438 MachineBasicBlock *
2439 AArch64TargetLowering::EmitInsertVectorToTile(unsigned Opc, unsigned BaseReg,
2440                                               MachineInstr &MI,
2441                                               MachineBasicBlock *BB) const {
2442   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2443   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2444 
2445   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2446   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2447   MIB.add(MI.getOperand(1)); // Slice index register
2448   MIB.add(MI.getOperand(2)); // Slice index offset
2449   MIB.add(MI.getOperand(3)); // pg
2450   MIB.add(MI.getOperand(4)); // zn
2451 
2452   MI.eraseFromParent(); // The pseudo is gone now.
2453   return BB;
2454 }
2455 
2456 MachineBasicBlock *
2457 AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {
2458   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2459   MachineInstrBuilder MIB =
2460       BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(AArch64::ZERO_M));
2461   MIB.add(MI.getOperand(0)); // Mask
2462 
2463   unsigned Mask = MI.getOperand(0).getImm();
2464   for (unsigned I = 0; I < 8; I++) {
2465     if (Mask & (1 << I))
2466       MIB.addDef(AArch64::ZAD0 + I, RegState::ImplicitDefine);
2467   }
2468 
2469   MI.eraseFromParent(); // The pseudo is gone now.
2470   return BB;
2471 }
2472 
2473 MachineBasicBlock *
2474 AArch64TargetLowering::EmitAddVectorToTile(unsigned Opc, unsigned BaseReg,
2475                                            MachineInstr &MI,
2476                                            MachineBasicBlock *BB) const {
2477   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2478   MachineInstrBuilder MIB = BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Opc));
2479 
2480   MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define);
2481   MIB.addReg(BaseReg + MI.getOperand(0).getImm());
2482   MIB.add(MI.getOperand(1)); // pn
2483   MIB.add(MI.getOperand(2)); // pm
2484   MIB.add(MI.getOperand(3)); // zn
2485 
2486   MI.eraseFromParent(); // The pseudo is gone now.
2487   return BB;
2488 }
2489 
2490 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
2491     MachineInstr &MI, MachineBasicBlock *BB) const {
2492   switch (MI.getOpcode()) {
2493   default:
2494 #ifndef NDEBUG
2495     MI.dump();
2496 #endif
2497     llvm_unreachable("Unexpected instruction for custom inserter!");
2498 
2499   case AArch64::F128CSEL:
2500     return EmitF128CSEL(MI, BB);
2501 
2502   case TargetOpcode::STATEPOINT:
2503     // STATEPOINT is a pseudo instruction which has no implicit defs/uses
2504     // while bl call instruction (where statepoint will be lowered at the end)
2505     // has implicit def. This def is early-clobber as it will be set at
2506     // the moment of the call and earlier than any use is read.
2507     // Add this implicit dead def here as a workaround.
2508     MI.addOperand(*MI.getMF(),
2509                   MachineOperand::CreateReg(
2510                       AArch64::LR, /*isDef*/ true,
2511                       /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
2512                       /*isUndef*/ false, /*isEarlyClobber*/ true));
2513     LLVM_FALLTHROUGH;
2514   case TargetOpcode::STACKMAP:
2515   case TargetOpcode::PATCHPOINT:
2516     return emitPatchPoint(MI, BB);
2517 
2518   case AArch64::CATCHRET:
2519     return EmitLoweredCatchRet(MI, BB);
2520   case AArch64::LD1_MXIPXX_H_PSEUDO_B:
2521     return EmitTileLoad(AArch64::LD1_MXIPXX_H_B, AArch64::ZAB0, MI, BB);
2522   case AArch64::LD1_MXIPXX_H_PSEUDO_H:
2523     return EmitTileLoad(AArch64::LD1_MXIPXX_H_H, AArch64::ZAH0, MI, BB);
2524   case AArch64::LD1_MXIPXX_H_PSEUDO_S:
2525     return EmitTileLoad(AArch64::LD1_MXIPXX_H_S, AArch64::ZAS0, MI, BB);
2526   case AArch64::LD1_MXIPXX_H_PSEUDO_D:
2527     return EmitTileLoad(AArch64::LD1_MXIPXX_H_D, AArch64::ZAD0, MI, BB);
2528   case AArch64::LD1_MXIPXX_H_PSEUDO_Q:
2529     return EmitTileLoad(AArch64::LD1_MXIPXX_H_Q, AArch64::ZAQ0, MI, BB);
2530   case AArch64::LD1_MXIPXX_V_PSEUDO_B:
2531     return EmitTileLoad(AArch64::LD1_MXIPXX_V_B, AArch64::ZAB0, MI, BB);
2532   case AArch64::LD1_MXIPXX_V_PSEUDO_H:
2533     return EmitTileLoad(AArch64::LD1_MXIPXX_V_H, AArch64::ZAH0, MI, BB);
2534   case AArch64::LD1_MXIPXX_V_PSEUDO_S:
2535     return EmitTileLoad(AArch64::LD1_MXIPXX_V_S, AArch64::ZAS0, MI, BB);
2536   case AArch64::LD1_MXIPXX_V_PSEUDO_D:
2537     return EmitTileLoad(AArch64::LD1_MXIPXX_V_D, AArch64::ZAD0, MI, BB);
2538   case AArch64::LD1_MXIPXX_V_PSEUDO_Q:
2539     return EmitTileLoad(AArch64::LD1_MXIPXX_V_Q, AArch64::ZAQ0, MI, BB);
2540   case AArch64::LDR_ZA_PSEUDO:
2541     return EmitFill(MI, BB);
2542   case AArch64::BFMOPA_MPPZZ_PSEUDO:
2543     return EmitMopa(AArch64::BFMOPA_MPPZZ, AArch64::ZAS0, MI, BB);
2544   case AArch64::BFMOPS_MPPZZ_PSEUDO:
2545     return EmitMopa(AArch64::BFMOPS_MPPZZ, AArch64::ZAS0, MI, BB);
2546   case AArch64::FMOPAL_MPPZZ_PSEUDO:
2547     return EmitMopa(AArch64::FMOPAL_MPPZZ, AArch64::ZAS0, MI, BB);
2548   case AArch64::FMOPSL_MPPZZ_PSEUDO:
2549     return EmitMopa(AArch64::FMOPSL_MPPZZ, AArch64::ZAS0, MI, BB);
2550   case AArch64::FMOPA_MPPZZ_S_PSEUDO:
2551     return EmitMopa(AArch64::FMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2552   case AArch64::FMOPS_MPPZZ_S_PSEUDO:
2553     return EmitMopa(AArch64::FMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2554   case AArch64::FMOPA_MPPZZ_D_PSEUDO:
2555     return EmitMopa(AArch64::FMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2556   case AArch64::FMOPS_MPPZZ_D_PSEUDO:
2557     return EmitMopa(AArch64::FMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2558   case AArch64::SMOPA_MPPZZ_S_PSEUDO:
2559     return EmitMopa(AArch64::SMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2560   case AArch64::SMOPS_MPPZZ_S_PSEUDO:
2561     return EmitMopa(AArch64::SMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2562   case AArch64::UMOPA_MPPZZ_S_PSEUDO:
2563     return EmitMopa(AArch64::UMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2564   case AArch64::UMOPS_MPPZZ_S_PSEUDO:
2565     return EmitMopa(AArch64::UMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2566   case AArch64::SUMOPA_MPPZZ_S_PSEUDO:
2567     return EmitMopa(AArch64::SUMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2568   case AArch64::SUMOPS_MPPZZ_S_PSEUDO:
2569     return EmitMopa(AArch64::SUMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2570   case AArch64::USMOPA_MPPZZ_S_PSEUDO:
2571     return EmitMopa(AArch64::USMOPA_MPPZZ_S, AArch64::ZAS0, MI, BB);
2572   case AArch64::USMOPS_MPPZZ_S_PSEUDO:
2573     return EmitMopa(AArch64::USMOPS_MPPZZ_S, AArch64::ZAS0, MI, BB);
2574   case AArch64::SMOPA_MPPZZ_D_PSEUDO:
2575     return EmitMopa(AArch64::SMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2576   case AArch64::SMOPS_MPPZZ_D_PSEUDO:
2577     return EmitMopa(AArch64::SMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2578   case AArch64::UMOPA_MPPZZ_D_PSEUDO:
2579     return EmitMopa(AArch64::UMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2580   case AArch64::UMOPS_MPPZZ_D_PSEUDO:
2581     return EmitMopa(AArch64::UMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2582   case AArch64::SUMOPA_MPPZZ_D_PSEUDO:
2583     return EmitMopa(AArch64::SUMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2584   case AArch64::SUMOPS_MPPZZ_D_PSEUDO:
2585     return EmitMopa(AArch64::SUMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2586   case AArch64::USMOPA_MPPZZ_D_PSEUDO:
2587     return EmitMopa(AArch64::USMOPA_MPPZZ_D, AArch64::ZAD0, MI, BB);
2588   case AArch64::USMOPS_MPPZZ_D_PSEUDO:
2589     return EmitMopa(AArch64::USMOPS_MPPZZ_D, AArch64::ZAD0, MI, BB);
2590   case AArch64::INSERT_MXIPZ_H_PSEUDO_B:
2591     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_B, AArch64::ZAB0, MI,
2592                                   BB);
2593   case AArch64::INSERT_MXIPZ_H_PSEUDO_H:
2594     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_H, AArch64::ZAH0, MI,
2595                                   BB);
2596   case AArch64::INSERT_MXIPZ_H_PSEUDO_S:
2597     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_S, AArch64::ZAS0, MI,
2598                                   BB);
2599   case AArch64::INSERT_MXIPZ_H_PSEUDO_D:
2600     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_D, AArch64::ZAD0, MI,
2601                                   BB);
2602   case AArch64::INSERT_MXIPZ_H_PSEUDO_Q:
2603     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_H_Q, AArch64::ZAQ0, MI,
2604                                   BB);
2605   case AArch64::INSERT_MXIPZ_V_PSEUDO_B:
2606     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_B, AArch64::ZAB0, MI,
2607                                   BB);
2608   case AArch64::INSERT_MXIPZ_V_PSEUDO_H:
2609     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_H, AArch64::ZAH0, MI,
2610                                   BB);
2611   case AArch64::INSERT_MXIPZ_V_PSEUDO_S:
2612     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_S, AArch64::ZAS0, MI,
2613                                   BB);
2614   case AArch64::INSERT_MXIPZ_V_PSEUDO_D:
2615     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_D, AArch64::ZAD0, MI,
2616                                   BB);
2617   case AArch64::INSERT_MXIPZ_V_PSEUDO_Q:
2618     return EmitInsertVectorToTile(AArch64::INSERT_MXIPZ_V_Q, AArch64::ZAQ0, MI,
2619                                   BB);
2620   case AArch64::ZERO_M_PSEUDO:
2621     return EmitZero(MI, BB);
2622   case AArch64::ADDHA_MPPZ_PSEUDO_S:
2623     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_S, AArch64::ZAS0, MI, BB);
2624   case AArch64::ADDVA_MPPZ_PSEUDO_S:
2625     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_S, AArch64::ZAS0, MI, BB);
2626   case AArch64::ADDHA_MPPZ_PSEUDO_D:
2627     return EmitAddVectorToTile(AArch64::ADDHA_MPPZ_D, AArch64::ZAD0, MI, BB);
2628   case AArch64::ADDVA_MPPZ_PSEUDO_D:
2629     return EmitAddVectorToTile(AArch64::ADDVA_MPPZ_D, AArch64::ZAD0, MI, BB);
2630   }
2631 }
2632 
2633 //===----------------------------------------------------------------------===//
2634 // AArch64 Lowering private implementation.
2635 //===----------------------------------------------------------------------===//
2636 
2637 //===----------------------------------------------------------------------===//
2638 // Lowering Code
2639 //===----------------------------------------------------------------------===//
2640 
2641 // Forward declarations of SVE fixed length lowering helpers
2642 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
2643 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2644 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
2645 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
2646                                                 SelectionDAG &DAG);
2647 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
2648                                              EVT VT);
2649 
2650 /// isZerosVector - Check whether SDNode N is a zero-filled vector.
2651 static bool isZerosVector(const SDNode *N) {
2652   // Look through a bit convert.
2653   while (N->getOpcode() == ISD::BITCAST)
2654     N = N->getOperand(0).getNode();
2655 
2656   if (ISD::isConstantSplatVectorAllZeros(N))
2657     return true;
2658 
2659   if (N->getOpcode() != AArch64ISD::DUP)
2660     return false;
2661 
2662   auto Opnd0 = N->getOperand(0);
2663   auto *CINT = dyn_cast<ConstantSDNode>(Opnd0);
2664   auto *CFP = dyn_cast<ConstantFPSDNode>(Opnd0);
2665   return (CINT && CINT->isZero()) || (CFP && CFP->isZero());
2666 }
2667 
2668 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
2669 /// CC
2670 static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
2671   switch (CC) {
2672   default:
2673     llvm_unreachable("Unknown condition code!");
2674   case ISD::SETNE:
2675     return AArch64CC::NE;
2676   case ISD::SETEQ:
2677     return AArch64CC::EQ;
2678   case ISD::SETGT:
2679     return AArch64CC::GT;
2680   case ISD::SETGE:
2681     return AArch64CC::GE;
2682   case ISD::SETLT:
2683     return AArch64CC::LT;
2684   case ISD::SETLE:
2685     return AArch64CC::LE;
2686   case ISD::SETUGT:
2687     return AArch64CC::HI;
2688   case ISD::SETUGE:
2689     return AArch64CC::HS;
2690   case ISD::SETULT:
2691     return AArch64CC::LO;
2692   case ISD::SETULE:
2693     return AArch64CC::LS;
2694   }
2695 }
2696 
2697 /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
2698 static void changeFPCCToAArch64CC(ISD::CondCode CC,
2699                                   AArch64CC::CondCode &CondCode,
2700                                   AArch64CC::CondCode &CondCode2) {
2701   CondCode2 = AArch64CC::AL;
2702   switch (CC) {
2703   default:
2704     llvm_unreachable("Unknown FP condition!");
2705   case ISD::SETEQ:
2706   case ISD::SETOEQ:
2707     CondCode = AArch64CC::EQ;
2708     break;
2709   case ISD::SETGT:
2710   case ISD::SETOGT:
2711     CondCode = AArch64CC::GT;
2712     break;
2713   case ISD::SETGE:
2714   case ISD::SETOGE:
2715     CondCode = AArch64CC::GE;
2716     break;
2717   case ISD::SETOLT:
2718     CondCode = AArch64CC::MI;
2719     break;
2720   case ISD::SETOLE:
2721     CondCode = AArch64CC::LS;
2722     break;
2723   case ISD::SETONE:
2724     CondCode = AArch64CC::MI;
2725     CondCode2 = AArch64CC::GT;
2726     break;
2727   case ISD::SETO:
2728     CondCode = AArch64CC::VC;
2729     break;
2730   case ISD::SETUO:
2731     CondCode = AArch64CC::VS;
2732     break;
2733   case ISD::SETUEQ:
2734     CondCode = AArch64CC::EQ;
2735     CondCode2 = AArch64CC::VS;
2736     break;
2737   case ISD::SETUGT:
2738     CondCode = AArch64CC::HI;
2739     break;
2740   case ISD::SETUGE:
2741     CondCode = AArch64CC::PL;
2742     break;
2743   case ISD::SETLT:
2744   case ISD::SETULT:
2745     CondCode = AArch64CC::LT;
2746     break;
2747   case ISD::SETLE:
2748   case ISD::SETULE:
2749     CondCode = AArch64CC::LE;
2750     break;
2751   case ISD::SETNE:
2752   case ISD::SETUNE:
2753     CondCode = AArch64CC::NE;
2754     break;
2755   }
2756 }
2757 
2758 /// Convert a DAG fp condition code to an AArch64 CC.
2759 /// This differs from changeFPCCToAArch64CC in that it returns cond codes that
2760 /// should be AND'ed instead of OR'ed.
2761 static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
2762                                      AArch64CC::CondCode &CondCode,
2763                                      AArch64CC::CondCode &CondCode2) {
2764   CondCode2 = AArch64CC::AL;
2765   switch (CC) {
2766   default:
2767     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2768     assert(CondCode2 == AArch64CC::AL);
2769     break;
2770   case ISD::SETONE:
2771     // (a one b)
2772     // == ((a olt b) || (a ogt b))
2773     // == ((a ord b) && (a une b))
2774     CondCode = AArch64CC::VC;
2775     CondCode2 = AArch64CC::NE;
2776     break;
2777   case ISD::SETUEQ:
2778     // (a ueq b)
2779     // == ((a uno b) || (a oeq b))
2780     // == ((a ule b) && (a uge b))
2781     CondCode = AArch64CC::PL;
2782     CondCode2 = AArch64CC::LE;
2783     break;
2784   }
2785 }
2786 
2787 /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
2788 /// CC usable with the vector instructions. Fewer operations are available
2789 /// without a real NZCV register, so we have to use less efficient combinations
2790 /// to get the same effect.
2791 static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
2792                                         AArch64CC::CondCode &CondCode,
2793                                         AArch64CC::CondCode &CondCode2,
2794                                         bool &Invert) {
2795   Invert = false;
2796   switch (CC) {
2797   default:
2798     // Mostly the scalar mappings work fine.
2799     changeFPCCToAArch64CC(CC, CondCode, CondCode2);
2800     break;
2801   case ISD::SETUO:
2802     Invert = true;
2803     LLVM_FALLTHROUGH;
2804   case ISD::SETO:
2805     CondCode = AArch64CC::MI;
2806     CondCode2 = AArch64CC::GE;
2807     break;
2808   case ISD::SETUEQ:
2809   case ISD::SETULT:
2810   case ISD::SETULE:
2811   case ISD::SETUGT:
2812   case ISD::SETUGE:
2813     // All of the compare-mask comparisons are ordered, but we can switch
2814     // between the two by a double inversion. E.g. ULE == !OGT.
2815     Invert = true;
2816     changeFPCCToAArch64CC(getSetCCInverse(CC, /* FP inverse */ MVT::f32),
2817                           CondCode, CondCode2);
2818     break;
2819   }
2820 }
2821 
2822 static bool isLegalArithImmed(uint64_t C) {
2823   // Matches AArch64DAGToDAGISel::SelectArithImmed().
2824   bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
2825   LLVM_DEBUG(dbgs() << "Is imm " << C
2826                     << " legal: " << (IsLegal ? "yes\n" : "no\n"));
2827   return IsLegal;
2828 }
2829 
2830 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
2831 // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
2832 // can be set differently by this operation. It comes down to whether
2833 // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
2834 // everything is fine. If not then the optimization is wrong. Thus general
2835 // comparisons are only valid if op2 != 0.
2836 //
2837 // So, finally, the only LLVM-native comparisons that don't mention C and V
2838 // are SETEQ and SETNE. They're the only ones we can safely use CMN for in
2839 // the absence of information about op2.
2840 static bool isCMN(SDValue Op, ISD::CondCode CC) {
2841   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
2842          (CC == ISD::SETEQ || CC == ISD::SETNE);
2843 }
2844 
2845 static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
2846                                       SelectionDAG &DAG, SDValue Chain,
2847                                       bool IsSignaling) {
2848   EVT VT = LHS.getValueType();
2849   assert(VT != MVT::f128);
2850 
2851   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2852 
2853   if (VT == MVT::f16 && !FullFP16) {
2854     LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2855                       {Chain, LHS});
2856     RHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
2857                       {LHS.getValue(1), RHS});
2858     Chain = RHS.getValue(1);
2859     VT = MVT::f32;
2860   }
2861   unsigned Opcode =
2862       IsSignaling ? AArch64ISD::STRICT_FCMPE : AArch64ISD::STRICT_FCMP;
2863   return DAG.getNode(Opcode, dl, {VT, MVT::Other}, {Chain, LHS, RHS});
2864 }
2865 
2866 static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2867                               const SDLoc &dl, SelectionDAG &DAG) {
2868   EVT VT = LHS.getValueType();
2869   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2870 
2871   if (VT.isFloatingPoint()) {
2872     assert(VT != MVT::f128);
2873     if (VT == MVT::f16 && !FullFP16) {
2874       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
2875       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
2876       VT = MVT::f32;
2877     }
2878     return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
2879   }
2880 
2881   // The CMP instruction is just an alias for SUBS, and representing it as
2882   // SUBS means that it's possible to get CSE with subtract operations.
2883   // A later phase can perform the optimization of setting the destination
2884   // register to WZR/XZR if it ends up being unused.
2885   unsigned Opcode = AArch64ISD::SUBS;
2886 
2887   if (isCMN(RHS, CC)) {
2888     // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
2889     Opcode = AArch64ISD::ADDS;
2890     RHS = RHS.getOperand(1);
2891   } else if (isCMN(LHS, CC)) {
2892     // As we are looking for EQ/NE compares, the operands can be commuted ; can
2893     // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
2894     Opcode = AArch64ISD::ADDS;
2895     LHS = LHS.getOperand(1);
2896   } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
2897     if (LHS.getOpcode() == ISD::AND) {
2898       // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
2899       // (a.k.a. ANDS) except that the flags are only guaranteed to work for one
2900       // of the signed comparisons.
2901       const SDValue ANDSNode = DAG.getNode(AArch64ISD::ANDS, dl,
2902                                            DAG.getVTList(VT, MVT_CC),
2903                                            LHS.getOperand(0),
2904                                            LHS.getOperand(1));
2905       // Replace all users of (and X, Y) with newly generated (ands X, Y)
2906       DAG.ReplaceAllUsesWith(LHS, ANDSNode);
2907       return ANDSNode.getValue(1);
2908     } else if (LHS.getOpcode() == AArch64ISD::ANDS) {
2909       // Use result of ANDS
2910       return LHS.getValue(1);
2911     }
2912   }
2913 
2914   return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
2915       .getValue(1);
2916 }
2917 
2918 /// \defgroup AArch64CCMP CMP;CCMP matching
2919 ///
2920 /// These functions deal with the formation of CMP;CCMP;... sequences.
2921 /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
2922 /// a comparison. They set the NZCV flags to a predefined value if their
2923 /// predicate is false. This allows to express arbitrary conjunctions, for
2924 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
2925 /// expressed as:
2926 ///   cmp A
2927 ///   ccmp B, inv(CB), CA
2928 ///   check for CB flags
2929 ///
2930 /// This naturally lets us implement chains of AND operations with SETCC
2931 /// operands. And we can even implement some other situations by transforming
2932 /// them:
2933 ///   - We can implement (NEG SETCC) i.e. negating a single comparison by
2934 ///     negating the flags used in a CCMP/FCCMP operations.
2935 ///   - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
2936 ///     by negating the flags we test for afterwards. i.e.
2937 ///     NEG (CMP CCMP CCCMP ...) can be implemented.
2938 ///   - Note that we can only ever negate all previously processed results.
2939 ///     What we can not implement by flipping the flags to test is a negation
2940 ///     of two sub-trees (because the negation affects all sub-trees emitted so
2941 ///     far, so the 2nd sub-tree we emit would also affect the first).
2942 /// With those tools we can implement some OR operations:
2943 ///   - (OR (SETCC A) (SETCC B)) can be implemented via:
2944 ///     NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
2945 ///   - After transforming OR to NEG/AND combinations we may be able to use NEG
2946 ///     elimination rules from earlier to implement the whole thing as a
2947 ///     CCMP/FCCMP chain.
2948 ///
2949 /// As complete example:
2950 ///     or (or (setCA (cmp A)) (setCB (cmp B)))
2951 ///        (and (setCC (cmp C)) (setCD (cmp D)))"
2952 /// can be reassociated to:
2953 ///     or (and (setCC (cmp C)) setCD (cmp D))
2954 //         (or (setCA (cmp A)) (setCB (cmp B)))
2955 /// can be transformed to:
2956 ///     not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
2957 ///              (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
2958 /// which can be implemented as:
2959 ///   cmp C
2960 ///   ccmp D, inv(CD), CC
2961 ///   ccmp A, CA, inv(CD)
2962 ///   ccmp B, CB, inv(CA)
2963 ///   check for CB flags
2964 ///
2965 /// A counterexample is "or (and A B) (and C D)" which translates to
2966 /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
2967 /// can only implement 1 of the inner (not) operations, but not both!
2968 /// @{
2969 
2970 /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
2971 static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
2972                                          ISD::CondCode CC, SDValue CCOp,
2973                                          AArch64CC::CondCode Predicate,
2974                                          AArch64CC::CondCode OutCC,
2975                                          const SDLoc &DL, SelectionDAG &DAG) {
2976   unsigned Opcode = 0;
2977   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
2978 
2979   if (LHS.getValueType().isFloatingPoint()) {
2980     assert(LHS.getValueType() != MVT::f128);
2981     if (LHS.getValueType() == MVT::f16 && !FullFP16) {
2982       LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
2983       RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
2984     }
2985     Opcode = AArch64ISD::FCCMP;
2986   } else if (RHS.getOpcode() == ISD::SUB) {
2987     SDValue SubOp0 = RHS.getOperand(0);
2988     if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2989       // See emitComparison() on why we can only do this for SETEQ and SETNE.
2990       Opcode = AArch64ISD::CCMN;
2991       RHS = RHS.getOperand(1);
2992     }
2993   }
2994   if (Opcode == 0)
2995     Opcode = AArch64ISD::CCMP;
2996 
2997   SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
2998   AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
2999   unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
3000   SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
3001   return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
3002 }
3003 
3004 /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
3005 /// expressed as a conjunction. See \ref AArch64CCMP.
3006 /// \param CanNegate    Set to true if we can negate the whole sub-tree just by
3007 ///                     changing the conditions on the SETCC tests.
3008 ///                     (this means we can call emitConjunctionRec() with
3009 ///                      Negate==true on this sub-tree)
3010 /// \param MustBeFirst  Set to true if this subtree needs to be negated and we
3011 ///                     cannot do the negation naturally. We are required to
3012 ///                     emit the subtree first in this case.
3013 /// \param WillNegate   Is true if are called when the result of this
3014 ///                     subexpression must be negated. This happens when the
3015 ///                     outer expression is an OR. We can use this fact to know
3016 ///                     that we have a double negation (or (or ...) ...) that
3017 ///                     can be implemented for free.
3018 static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
3019                                bool &MustBeFirst, bool WillNegate,
3020                                unsigned Depth = 0) {
3021   if (!Val.hasOneUse())
3022     return false;
3023   unsigned Opcode = Val->getOpcode();
3024   if (Opcode == ISD::SETCC) {
3025     if (Val->getOperand(0).getValueType() == MVT::f128)
3026       return false;
3027     CanNegate = true;
3028     MustBeFirst = false;
3029     return true;
3030   }
3031   // Protect against exponential runtime and stack overflow.
3032   if (Depth > 6)
3033     return false;
3034   if (Opcode == ISD::AND || Opcode == ISD::OR) {
3035     bool IsOR = Opcode == ISD::OR;
3036     SDValue O0 = Val->getOperand(0);
3037     SDValue O1 = Val->getOperand(1);
3038     bool CanNegateL;
3039     bool MustBeFirstL;
3040     if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1))
3041       return false;
3042     bool CanNegateR;
3043     bool MustBeFirstR;
3044     if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1))
3045       return false;
3046 
3047     if (MustBeFirstL && MustBeFirstR)
3048       return false;
3049 
3050     if (IsOR) {
3051       // For an OR expression we need to be able to naturally negate at least
3052       // one side or we cannot do the transformation at all.
3053       if (!CanNegateL && !CanNegateR)
3054         return false;
3055       // If we the result of the OR will be negated and we can naturally negate
3056       // the leafs, then this sub-tree as a whole negates naturally.
3057       CanNegate = WillNegate && CanNegateL && CanNegateR;
3058       // If we cannot naturally negate the whole sub-tree, then this must be
3059       // emitted first.
3060       MustBeFirst = !CanNegate;
3061     } else {
3062       assert(Opcode == ISD::AND && "Must be OR or AND");
3063       // We cannot naturally negate an AND operation.
3064       CanNegate = false;
3065       MustBeFirst = MustBeFirstL || MustBeFirstR;
3066     }
3067     return true;
3068   }
3069   return false;
3070 }
3071 
3072 /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
3073 /// of CCMP/CFCMP ops. See @ref AArch64CCMP.
3074 /// Tries to transform the given i1 producing node @p Val to a series compare
3075 /// and conditional compare operations. @returns an NZCV flags producing node
3076 /// and sets @p OutCC to the flags that should be tested or returns SDValue() if
3077 /// transformation was not possible.
3078 /// \p Negate is true if we want this sub-tree being negated just by changing
3079 /// SETCC conditions.
3080 static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
3081     AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
3082     AArch64CC::CondCode Predicate) {
3083   // We're at a tree leaf, produce a conditional comparison operation.
3084   unsigned Opcode = Val->getOpcode();
3085   if (Opcode == ISD::SETCC) {
3086     SDValue LHS = Val->getOperand(0);
3087     SDValue RHS = Val->getOperand(1);
3088     ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
3089     bool isInteger = LHS.getValueType().isInteger();
3090     if (Negate)
3091       CC = getSetCCInverse(CC, LHS.getValueType());
3092     SDLoc DL(Val);
3093     // Determine OutCC and handle FP special case.
3094     if (isInteger) {
3095       OutCC = changeIntCCToAArch64CC(CC);
3096     } else {
3097       assert(LHS.getValueType().isFloatingPoint());
3098       AArch64CC::CondCode ExtraCC;
3099       changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
3100       // Some floating point conditions can't be tested with a single condition
3101       // code. Construct an additional comparison in this case.
3102       if (ExtraCC != AArch64CC::AL) {
3103         SDValue ExtraCmp;
3104         if (!CCOp.getNode())
3105           ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
3106         else
3107           ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
3108                                                ExtraCC, DL, DAG);
3109         CCOp = ExtraCmp;
3110         Predicate = ExtraCC;
3111       }
3112     }
3113 
3114     // Produce a normal comparison if we are first in the chain
3115     if (!CCOp)
3116       return emitComparison(LHS, RHS, CC, DL, DAG);
3117     // Otherwise produce a ccmp.
3118     return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
3119                                      DAG);
3120   }
3121   assert(Val->hasOneUse() && "Valid conjunction/disjunction tree");
3122 
3123   bool IsOR = Opcode == ISD::OR;
3124 
3125   SDValue LHS = Val->getOperand(0);
3126   bool CanNegateL;
3127   bool MustBeFirstL;
3128   bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR);
3129   assert(ValidL && "Valid conjunction/disjunction tree");
3130   (void)ValidL;
3131 
3132   SDValue RHS = Val->getOperand(1);
3133   bool CanNegateR;
3134   bool MustBeFirstR;
3135   bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR);
3136   assert(ValidR && "Valid conjunction/disjunction tree");
3137   (void)ValidR;
3138 
3139   // Swap sub-tree that must come first to the right side.
3140   if (MustBeFirstL) {
3141     assert(!MustBeFirstR && "Valid conjunction/disjunction tree");
3142     std::swap(LHS, RHS);
3143     std::swap(CanNegateL, CanNegateR);
3144     std::swap(MustBeFirstL, MustBeFirstR);
3145   }
3146 
3147   bool NegateR;
3148   bool NegateAfterR;
3149   bool NegateL;
3150   bool NegateAfterAll;
3151   if (Opcode == ISD::OR) {
3152     // Swap the sub-tree that we can negate naturally to the left.
3153     if (!CanNegateL) {
3154       assert(CanNegateR && "at least one side must be negatable");
3155       assert(!MustBeFirstR && "invalid conjunction/disjunction tree");
3156       assert(!Negate);
3157       std::swap(LHS, RHS);
3158       NegateR = false;
3159       NegateAfterR = true;
3160     } else {
3161       // Negate the left sub-tree if possible, otherwise negate the result.
3162       NegateR = CanNegateR;
3163       NegateAfterR = !CanNegateR;
3164     }
3165     NegateL = true;
3166     NegateAfterAll = !Negate;
3167   } else {
3168     assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree");
3169     assert(!Negate && "Valid conjunction/disjunction tree");
3170 
3171     NegateL = false;
3172     NegateR = false;
3173     NegateAfterR = false;
3174     NegateAfterAll = false;
3175   }
3176 
3177   // Emit sub-trees.
3178   AArch64CC::CondCode RHSCC;
3179   SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate);
3180   if (NegateAfterR)
3181     RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
3182   SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC);
3183   if (NegateAfterAll)
3184     OutCC = AArch64CC::getInvertedCondCode(OutCC);
3185   return CmpL;
3186 }
3187 
3188 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
3189 /// In some cases this is even possible with OR operations in the expression.
3190 /// See \ref AArch64CCMP.
3191 /// \see emitConjunctionRec().
3192 static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
3193                                AArch64CC::CondCode &OutCC) {
3194   bool DummyCanNegate;
3195   bool DummyMustBeFirst;
3196   if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false))
3197     return SDValue();
3198 
3199   return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL);
3200 }
3201 
3202 /// @}
3203 
3204 /// Returns how profitable it is to fold a comparison's operand's shift and/or
3205 /// extension operations.
3206 static unsigned getCmpOperandFoldingProfit(SDValue Op) {
3207   auto isSupportedExtend = [&](SDValue V) {
3208     if (V.getOpcode() == ISD::SIGN_EXTEND_INREG)
3209       return true;
3210 
3211     if (V.getOpcode() == ISD::AND)
3212       if (ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3213         uint64_t Mask = MaskCst->getZExtValue();
3214         return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
3215       }
3216 
3217     return false;
3218   };
3219 
3220   if (!Op.hasOneUse())
3221     return 0;
3222 
3223   if (isSupportedExtend(Op))
3224     return 1;
3225 
3226   unsigned Opc = Op.getOpcode();
3227   if (Opc == ISD::SHL || Opc == ISD::SRL || Opc == ISD::SRA)
3228     if (ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3229       uint64_t Shift = ShiftCst->getZExtValue();
3230       if (isSupportedExtend(Op.getOperand(0)))
3231         return (Shift <= 4) ? 2 : 1;
3232       EVT VT = Op.getValueType();
3233       if ((VT == MVT::i32 && Shift <= 31) || (VT == MVT::i64 && Shift <= 63))
3234         return 1;
3235     }
3236 
3237   return 0;
3238 }
3239 
3240 static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3241                              SDValue &AArch64cc, SelectionDAG &DAG,
3242                              const SDLoc &dl) {
3243   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3244     EVT VT = RHS.getValueType();
3245     uint64_t C = RHSC->getZExtValue();
3246     if (!isLegalArithImmed(C)) {
3247       // Constant does not fit, try adjusting it by one?
3248       switch (CC) {
3249       default:
3250         break;
3251       case ISD::SETLT:
3252       case ISD::SETGE:
3253         if ((VT == MVT::i32 && C != 0x80000000 &&
3254              isLegalArithImmed((uint32_t)(C - 1))) ||
3255             (VT == MVT::i64 && C != 0x80000000ULL &&
3256              isLegalArithImmed(C - 1ULL))) {
3257           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3258           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3259           RHS = DAG.getConstant(C, dl, VT);
3260         }
3261         break;
3262       case ISD::SETULT:
3263       case ISD::SETUGE:
3264         if ((VT == MVT::i32 && C != 0 &&
3265              isLegalArithImmed((uint32_t)(C - 1))) ||
3266             (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
3267           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3268           C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
3269           RHS = DAG.getConstant(C, dl, VT);
3270         }
3271         break;
3272       case ISD::SETLE:
3273       case ISD::SETGT:
3274         if ((VT == MVT::i32 && C != INT32_MAX &&
3275              isLegalArithImmed((uint32_t)(C + 1))) ||
3276             (VT == MVT::i64 && C != INT64_MAX &&
3277              isLegalArithImmed(C + 1ULL))) {
3278           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3279           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3280           RHS = DAG.getConstant(C, dl, VT);
3281         }
3282         break;
3283       case ISD::SETULE:
3284       case ISD::SETUGT:
3285         if ((VT == MVT::i32 && C != UINT32_MAX &&
3286              isLegalArithImmed((uint32_t)(C + 1))) ||
3287             (VT == MVT::i64 && C != UINT64_MAX &&
3288              isLegalArithImmed(C + 1ULL))) {
3289           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3290           C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
3291           RHS = DAG.getConstant(C, dl, VT);
3292         }
3293         break;
3294       }
3295     }
3296   }
3297 
3298   // Comparisons are canonicalized so that the RHS operand is simpler than the
3299   // LHS one, the extreme case being when RHS is an immediate. However, AArch64
3300   // can fold some shift+extend operations on the RHS operand, so swap the
3301   // operands if that can be done.
3302   //
3303   // For example:
3304   //    lsl     w13, w11, #1
3305   //    cmp     w13, w12
3306   // can be turned into:
3307   //    cmp     w12, w11, lsl #1
3308   if (!isa<ConstantSDNode>(RHS) ||
3309       !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
3310     SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
3311 
3312     if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
3313       std::swap(LHS, RHS);
3314       CC = ISD::getSetCCSwappedOperands(CC);
3315     }
3316   }
3317 
3318   SDValue Cmp;
3319   AArch64CC::CondCode AArch64CC;
3320   if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
3321     const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
3322 
3323     // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
3324     // For the i8 operand, the largest immediate is 255, so this can be easily
3325     // encoded in the compare instruction. For the i16 operand, however, the
3326     // largest immediate cannot be encoded in the compare.
3327     // Therefore, use a sign extending load and cmn to avoid materializing the
3328     // -1 constant. For example,
3329     // movz w1, #65535
3330     // ldrh w0, [x0, #0]
3331     // cmp w0, w1
3332     // >
3333     // ldrsh w0, [x0, #0]
3334     // cmn w0, #1
3335     // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
3336     // if and only if (sext LHS) == (sext RHS). The checks are in place to
3337     // ensure both the LHS and RHS are truly zero extended and to make sure the
3338     // transformation is profitable.
3339     if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
3340         cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
3341         cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
3342         LHS.getNode()->hasNUsesOfValue(1, 0)) {
3343       int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
3344       if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
3345         SDValue SExt =
3346             DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
3347                         DAG.getValueType(MVT::i16));
3348         Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
3349                                                    RHS.getValueType()),
3350                              CC, dl, DAG);
3351         AArch64CC = changeIntCCToAArch64CC(CC);
3352       }
3353     }
3354 
3355     if (!Cmp && (RHSC->isZero() || RHSC->isOne())) {
3356       if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) {
3357         if ((CC == ISD::SETNE) ^ RHSC->isZero())
3358           AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
3359       }
3360     }
3361   }
3362 
3363   if (!Cmp) {
3364     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
3365     AArch64CC = changeIntCCToAArch64CC(CC);
3366   }
3367   AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
3368   return Cmp;
3369 }
3370 
3371 static std::pair<SDValue, SDValue>
3372 getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
3373   assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) &&
3374          "Unsupported value type");
3375   SDValue Value, Overflow;
3376   SDLoc DL(Op);
3377   SDValue LHS = Op.getOperand(0);
3378   SDValue RHS = Op.getOperand(1);
3379   unsigned Opc = 0;
3380   switch (Op.getOpcode()) {
3381   default:
3382     llvm_unreachable("Unknown overflow instruction!");
3383   case ISD::SADDO:
3384     Opc = AArch64ISD::ADDS;
3385     CC = AArch64CC::VS;
3386     break;
3387   case ISD::UADDO:
3388     Opc = AArch64ISD::ADDS;
3389     CC = AArch64CC::HS;
3390     break;
3391   case ISD::SSUBO:
3392     Opc = AArch64ISD::SUBS;
3393     CC = AArch64CC::VS;
3394     break;
3395   case ISD::USUBO:
3396     Opc = AArch64ISD::SUBS;
3397     CC = AArch64CC::LO;
3398     break;
3399   // Multiply needs a little bit extra work.
3400   case ISD::SMULO:
3401   case ISD::UMULO: {
3402     CC = AArch64CC::NE;
3403     bool IsSigned = Op.getOpcode() == ISD::SMULO;
3404     if (Op.getValueType() == MVT::i32) {
3405       // Extend to 64-bits, then perform a 64-bit multiply.
3406       unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3407       LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS);
3408       RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
3409       SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3410       Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
3411 
3412       // Check that the result fits into a 32-bit integer.
3413       SDVTList VTs = DAG.getVTList(MVT::i64, MVT_CC);
3414       if (IsSigned) {
3415         // cmp xreg, wreg, sxtw
3416         SDValue SExtMul = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Value);
3417         Overflow =
3418             DAG.getNode(AArch64ISD::SUBS, DL, VTs, Mul, SExtMul).getValue(1);
3419       } else {
3420         // tst xreg, #0xffffffff00000000
3421         SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64);
3422         Overflow =
3423             DAG.getNode(AArch64ISD::ANDS, DL, VTs, Mul, UpperBits).getValue(1);
3424       }
3425       break;
3426     }
3427     assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type");
3428     // For the 64 bit multiply
3429     Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
3430     if (IsSigned) {
3431       SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
3432       SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
3433                                       DAG.getConstant(63, DL, MVT::i64));
3434       // It is important that LowerBits is last, otherwise the arithmetic
3435       // shift will not be folded into the compare (SUBS).
3436       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3437       Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits)
3438                      .getValue(1);
3439     } else {
3440       SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
3441       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
3442       Overflow =
3443           DAG.getNode(AArch64ISD::SUBS, DL, VTs,
3444                       DAG.getConstant(0, DL, MVT::i64),
3445                       UpperBits).getValue(1);
3446     }
3447     break;
3448   }
3449   } // switch (...)
3450 
3451   if (Opc) {
3452     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
3453 
3454     // Emit the AArch64 operation with overflow check.
3455     Value = DAG.getNode(Opc, DL, VTs, LHS, RHS);
3456     Overflow = Value.getValue(1);
3457   }
3458   return std::make_pair(Value, Overflow);
3459 }
3460 
3461 SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
3462   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3463     return LowerToScalableOp(Op, DAG);
3464 
3465   SDValue Sel = Op.getOperand(0);
3466   SDValue Other = Op.getOperand(1);
3467   SDLoc dl(Sel);
3468 
3469   // If the operand is an overflow checking operation, invert the condition
3470   // code and kill the Not operation. I.e., transform:
3471   // (xor (overflow_op_bool, 1))
3472   //   -->
3473   // (csel 1, 0, invert(cc), overflow_op_bool)
3474   // ... which later gets transformed to just a cset instruction with an
3475   // inverted condition code, rather than a cset + eor sequence.
3476   if (isOneConstant(Other) && ISD::isOverflowIntrOpRes(Sel)) {
3477     // Only lower legal XALUO ops.
3478     if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0)))
3479       return SDValue();
3480 
3481     SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3482     SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3483     AArch64CC::CondCode CC;
3484     SDValue Value, Overflow;
3485     std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG);
3486     SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3487     return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal,
3488                        CCVal, Overflow);
3489   }
3490   // If neither operand is a SELECT_CC, give up.
3491   if (Sel.getOpcode() != ISD::SELECT_CC)
3492     std::swap(Sel, Other);
3493   if (Sel.getOpcode() != ISD::SELECT_CC)
3494     return Op;
3495 
3496   // The folding we want to perform is:
3497   // (xor x, (select_cc a, b, cc, 0, -1) )
3498   //   -->
3499   // (csel x, (xor x, -1), cc ...)
3500   //
3501   // The latter will get matched to a CSINV instruction.
3502 
3503   ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get();
3504   SDValue LHS = Sel.getOperand(0);
3505   SDValue RHS = Sel.getOperand(1);
3506   SDValue TVal = Sel.getOperand(2);
3507   SDValue FVal = Sel.getOperand(3);
3508 
3509   // FIXME: This could be generalized to non-integer comparisons.
3510   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
3511     return Op;
3512 
3513   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
3514   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
3515 
3516   // The values aren't constants, this isn't the pattern we're looking for.
3517   if (!CFVal || !CTVal)
3518     return Op;
3519 
3520   // We can commute the SELECT_CC by inverting the condition.  This
3521   // might be needed to make this fit into a CSINV pattern.
3522   if (CTVal->isAllOnes() && CFVal->isZero()) {
3523     std::swap(TVal, FVal);
3524     std::swap(CTVal, CFVal);
3525     CC = ISD::getSetCCInverse(CC, LHS.getValueType());
3526   }
3527 
3528   // If the constants line up, perform the transform!
3529   if (CTVal->isZero() && CFVal->isAllOnes()) {
3530     SDValue CCVal;
3531     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
3532 
3533     FVal = Other;
3534     TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
3535                        DAG.getConstant(-1ULL, dl, Other.getValueType()));
3536 
3537     return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
3538                        CCVal, Cmp);
3539   }
3540 
3541   return Op;
3542 }
3543 
3544 // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
3545 // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
3546 // sets 'C' bit to 0.
3547 static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {
3548   SDLoc DL(Value);
3549   EVT VT = Value.getValueType();
3550   SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value;
3551   SDValue Op1 = Invert ? Value : DAG.getConstant(1, DL, VT);
3552   SDValue Cmp =
3553       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::Glue), Op0, Op1);
3554   return Cmp.getValue(1);
3555 }
3556 
3557 // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
3558 // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
3559 static SDValue carryFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG,
3560                                 bool Invert) {
3561   assert(Flag.getResNo() == 1);
3562   SDLoc DL(Flag);
3563   SDValue Zero = DAG.getConstant(0, DL, VT);
3564   SDValue One = DAG.getConstant(1, DL, VT);
3565   unsigned Cond = Invert ? AArch64CC::LO : AArch64CC::HS;
3566   SDValue CC = DAG.getConstant(Cond, DL, MVT::i32);
3567   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3568 }
3569 
3570 // Value is 1 if 'V' bit of NZCV is 1, else 0
3571 static SDValue overflowFlagToValue(SDValue Flag, EVT VT, SelectionDAG &DAG) {
3572   assert(Flag.getResNo() == 1);
3573   SDLoc DL(Flag);
3574   SDValue Zero = DAG.getConstant(0, DL, VT);
3575   SDValue One = DAG.getConstant(1, DL, VT);
3576   SDValue CC = DAG.getConstant(AArch64CC::VS, DL, MVT::i32);
3577   return DAG.getNode(AArch64ISD::CSEL, DL, VT, One, Zero, CC, Flag);
3578 }
3579 
3580 // This lowering is inefficient, but it will get cleaned up by
3581 // `foldOverflowCheck`
3582 static SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG, unsigned Opcode,
3583                                 bool IsSigned) {
3584   EVT VT0 = Op.getValue(0).getValueType();
3585   EVT VT1 = Op.getValue(1).getValueType();
3586 
3587   if (VT0 != MVT::i32 && VT0 != MVT::i64)
3588     return SDValue();
3589 
3590   bool InvertCarry = Opcode == AArch64ISD::SBCS;
3591   SDValue OpLHS = Op.getOperand(0);
3592   SDValue OpRHS = Op.getOperand(1);
3593   SDValue OpCarryIn = valueToCarryFlag(Op.getOperand(2), DAG, InvertCarry);
3594 
3595   SDLoc DL(Op);
3596   SDVTList VTs = DAG.getVTList(VT0, VT1);
3597 
3598   SDValue Sum = DAG.getNode(Opcode, DL, DAG.getVTList(VT0, MVT::Glue), OpLHS,
3599                             OpRHS, OpCarryIn);
3600 
3601   SDValue OutFlag =
3602       IsSigned ? overflowFlagToValue(Sum.getValue(1), VT1, DAG)
3603                : carryFlagToValue(Sum.getValue(1), VT1, DAG, InvertCarry);
3604 
3605   return DAG.getNode(ISD::MERGE_VALUES, DL, VTs, Sum, OutFlag);
3606 }
3607 
3608 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
3609   // Let legalize expand this if it isn't a legal type yet.
3610   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3611     return SDValue();
3612 
3613   SDLoc dl(Op);
3614   AArch64CC::CondCode CC;
3615   // The actual operation that sets the overflow or carry flag.
3616   SDValue Value, Overflow;
3617   std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
3618 
3619   // We use 0 and 1 as false and true values.
3620   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3621   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3622 
3623   // We use an inverted condition, because the conditional select is inverted
3624   // too. This will allow it to be selected to a single instruction:
3625   // CSINC Wd, WZR, WZR, invert(cond).
3626   SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
3627   Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
3628                          CCVal, Overflow);
3629 
3630   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3631   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3632 }
3633 
3634 // Prefetch operands are:
3635 // 1: Address to prefetch
3636 // 2: bool isWrite
3637 // 3: int locality (0 = no locality ... 3 = extreme locality)
3638 // 4: bool isDataCache
3639 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
3640   SDLoc DL(Op);
3641   unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3642   unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
3643   unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3644 
3645   bool IsStream = !Locality;
3646   // When the locality number is set
3647   if (Locality) {
3648     // The front-end should have filtered out the out-of-range values
3649     assert(Locality <= 3 && "Prefetch locality out-of-range");
3650     // The locality degree is the opposite of the cache speed.
3651     // Put the number the other way around.
3652     // The encoding starts at 0 for level 1
3653     Locality = 3 - Locality;
3654   }
3655 
3656   // built the mask value encoding the expected behavior.
3657   unsigned PrfOp = (IsWrite << 4) |     // Load/Store bit
3658                    (!IsData << 3) |     // IsDataCache bit
3659                    (Locality << 1) |    // Cache level bits
3660                    (unsigned)IsStream;  // Stream bit
3661   return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
3662                      DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1));
3663 }
3664 
3665 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
3666                                               SelectionDAG &DAG) const {
3667   EVT VT = Op.getValueType();
3668   if (VT.isScalableVector())
3669     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
3670 
3671   if (useSVEForFixedLengthVectorVT(VT))
3672     return LowerFixedLengthFPExtendToSVE(Op, DAG);
3673 
3674   assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
3675   return SDValue();
3676 }
3677 
3678 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
3679                                              SelectionDAG &DAG) const {
3680   if (Op.getValueType().isScalableVector())
3681     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_ROUND_MERGE_PASSTHRU);
3682 
3683   bool IsStrict = Op->isStrictFPOpcode();
3684   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3685   EVT SrcVT = SrcVal.getValueType();
3686 
3687   if (useSVEForFixedLengthVectorVT(SrcVT))
3688     return LowerFixedLengthFPRoundToSVE(Op, DAG);
3689 
3690   if (SrcVT != MVT::f128) {
3691     // Expand cases where the input is a vector bigger than NEON.
3692     if (useSVEForFixedLengthVectorVT(SrcVT))
3693       return SDValue();
3694 
3695     // It's legal except when f128 is involved
3696     return Op;
3697   }
3698 
3699   return SDValue();
3700 }
3701 
3702 SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
3703                                                     SelectionDAG &DAG) const {
3704   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3705   // Any additional optimization in this function should be recorded
3706   // in the cost tables.
3707   bool IsStrict = Op->isStrictFPOpcode();
3708   EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType();
3709   EVT VT = Op.getValueType();
3710 
3711   if (VT.isScalableVector()) {
3712     unsigned Opcode = Op.getOpcode() == ISD::FP_TO_UINT
3713                           ? AArch64ISD::FCVTZU_MERGE_PASSTHRU
3714                           : AArch64ISD::FCVTZS_MERGE_PASSTHRU;
3715     return LowerToPredicatedOp(Op, DAG, Opcode);
3716   }
3717 
3718   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3719     return LowerFixedLengthFPToIntToSVE(Op, DAG);
3720 
3721   unsigned NumElts = InVT.getVectorNumElements();
3722 
3723   // f16 conversions are promoted to f32 when full fp16 is not supported.
3724   if (InVT.getVectorElementType() == MVT::f16 &&
3725       !Subtarget->hasFullFP16()) {
3726     MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts);
3727     SDLoc dl(Op);
3728     if (IsStrict) {
3729       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NewVT, MVT::Other},
3730                                 {Op.getOperand(0), Op.getOperand(1)});
3731       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3732                          {Ext.getValue(1), Ext.getValue(0)});
3733     }
3734     return DAG.getNode(
3735         Op.getOpcode(), dl, Op.getValueType(),
3736         DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0)));
3737   }
3738 
3739   uint64_t VTSize = VT.getFixedSizeInBits();
3740   uint64_t InVTSize = InVT.getFixedSizeInBits();
3741   if (VTSize < InVTSize) {
3742     SDLoc dl(Op);
3743     if (IsStrict) {
3744       InVT = InVT.changeVectorElementTypeToInteger();
3745       SDValue Cv = DAG.getNode(Op.getOpcode(), dl, {InVT, MVT::Other},
3746                                {Op.getOperand(0), Op.getOperand(1)});
3747       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3748       return DAG.getMergeValues({Trunc, Cv.getValue(1)}, dl);
3749     }
3750     SDValue Cv =
3751         DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(),
3752                     Op.getOperand(0));
3753     return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv);
3754   }
3755 
3756   if (VTSize > InVTSize) {
3757     SDLoc dl(Op);
3758     MVT ExtVT =
3759         MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()),
3760                          VT.getVectorNumElements());
3761     if (IsStrict) {
3762       SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {ExtVT, MVT::Other},
3763                                 {Op.getOperand(0), Op.getOperand(1)});
3764       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
3765                          {Ext.getValue(1), Ext.getValue(0)});
3766     }
3767     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0));
3768     return DAG.getNode(Op.getOpcode(), dl, VT, Ext);
3769   }
3770 
3771   // Use a scalar operation for conversions between single-element vectors of
3772   // the same size.
3773   if (NumElts == 1) {
3774     SDLoc dl(Op);
3775     SDValue Extract = DAG.getNode(
3776         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
3777         Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64));
3778     EVT ScalarVT = VT.getScalarType();
3779     if (IsStrict)
3780       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
3781                          {Op.getOperand(0), Extract});
3782     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
3783   }
3784 
3785   // Type changing conversions are illegal.
3786   return Op;
3787 }
3788 
3789 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
3790                                               SelectionDAG &DAG) const {
3791   bool IsStrict = Op->isStrictFPOpcode();
3792   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
3793 
3794   if (SrcVal.getValueType().isVector())
3795     return LowerVectorFP_TO_INT(Op, DAG);
3796 
3797   // f16 conversions are promoted to f32 when full fp16 is not supported.
3798   if (SrcVal.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
3799     SDLoc dl(Op);
3800     if (IsStrict) {
3801       SDValue Ext =
3802           DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other},
3803                       {Op.getOperand(0), SrcVal});
3804       return DAG.getNode(Op.getOpcode(), dl, {Op.getValueType(), MVT::Other},
3805                          {Ext.getValue(1), Ext.getValue(0)});
3806     }
3807     return DAG.getNode(
3808         Op.getOpcode(), dl, Op.getValueType(),
3809         DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, SrcVal));
3810   }
3811 
3812   if (SrcVal.getValueType() != MVT::f128) {
3813     // It's legal except when f128 is involved
3814     return Op;
3815   }
3816 
3817   return SDValue();
3818 }
3819 
3820 SDValue
3821 AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
3822                                                 SelectionDAG &DAG) const {
3823   // AArch64 FP-to-int conversions saturate to the destination element size, so
3824   // we can lower common saturating conversions to simple instructions.
3825   SDValue SrcVal = Op.getOperand(0);
3826   EVT SrcVT = SrcVal.getValueType();
3827   EVT DstVT = Op.getValueType();
3828   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3829 
3830   uint64_t SrcElementWidth = SrcVT.getScalarSizeInBits();
3831   uint64_t DstElementWidth = DstVT.getScalarSizeInBits();
3832   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3833   assert(SatWidth <= DstElementWidth &&
3834          "Saturation width cannot exceed result width");
3835 
3836   // TODO: Consider lowering to SVE operations, as in LowerVectorFP_TO_INT.
3837   // Currently, the `llvm.fpto[su]i.sat.*` intrinsics don't accept scalable
3838   // types, so this is hard to reach.
3839   if (DstVT.isScalableVector())
3840     return SDValue();
3841 
3842   EVT SrcElementVT = SrcVT.getVectorElementType();
3843 
3844   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3845   if (SrcElementVT == MVT::f16 &&
3846       (!Subtarget->hasFullFP16() || DstElementWidth > 16)) {
3847     MVT F32VT = MVT::getVectorVT(MVT::f32, SrcVT.getVectorNumElements());
3848     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), F32VT, SrcVal);
3849     SrcVT = F32VT;
3850     SrcElementVT = MVT::f32;
3851     SrcElementWidth = 32;
3852   } else if (SrcElementVT != MVT::f64 && SrcElementVT != MVT::f32 &&
3853              SrcElementVT != MVT::f16)
3854     return SDValue();
3855 
3856   SDLoc DL(Op);
3857   // Cases that we can emit directly.
3858   if (SrcElementWidth == DstElementWidth && SrcElementWidth == SatWidth)
3859     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3860                        DAG.getValueType(DstVT.getScalarType()));
3861 
3862   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3863   // result. This is only valid if the legal cvt is larger than the saturate
3864   // width. For double, as we don't have MIN/MAX, it can be simpler to scalarize
3865   // (at least until sqxtn is selected).
3866   if (SrcElementWidth < SatWidth || SrcElementVT == MVT::f64)
3867     return SDValue();
3868 
3869   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
3870   SDValue NativeCvt = DAG.getNode(Op.getOpcode(), DL, IntVT, SrcVal,
3871                                   DAG.getValueType(IntVT.getScalarType()));
3872   SDValue Sat;
3873   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3874     SDValue MinC = DAG.getConstant(
3875         APInt::getSignedMaxValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3876     SDValue Min = DAG.getNode(ISD::SMIN, DL, IntVT, NativeCvt, MinC);
3877     SDValue MaxC = DAG.getConstant(
3878         APInt::getSignedMinValue(SatWidth).sext(SrcElementWidth), DL, IntVT);
3879     Sat = DAG.getNode(ISD::SMAX, DL, IntVT, Min, MaxC);
3880   } else {
3881     SDValue MinC = DAG.getConstant(
3882         APInt::getAllOnesValue(SatWidth).zext(SrcElementWidth), DL, IntVT);
3883     Sat = DAG.getNode(ISD::UMIN, DL, IntVT, NativeCvt, MinC);
3884   }
3885 
3886   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3887 }
3888 
3889 SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
3890                                                   SelectionDAG &DAG) const {
3891   // AArch64 FP-to-int conversions saturate to the destination register size, so
3892   // we can lower common saturating conversions to simple instructions.
3893   SDValue SrcVal = Op.getOperand(0);
3894   EVT SrcVT = SrcVal.getValueType();
3895 
3896   if (SrcVT.isVector())
3897     return LowerVectorFP_TO_INT_SAT(Op, DAG);
3898 
3899   EVT DstVT = Op.getValueType();
3900   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3901   uint64_t SatWidth = SatVT.getScalarSizeInBits();
3902   uint64_t DstWidth = DstVT.getScalarSizeInBits();
3903   assert(SatWidth <= DstWidth && "Saturation width cannot exceed result width");
3904 
3905   // In the absence of FP16 support, promote f16 to f32 and saturate the result.
3906   if (SrcVT == MVT::f16 && !Subtarget->hasFullFP16()) {
3907     SrcVal = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, SrcVal);
3908     SrcVT = MVT::f32;
3909   } else if (SrcVT != MVT::f64 && SrcVT != MVT::f32 && SrcVT != MVT::f16)
3910     return SDValue();
3911 
3912   SDLoc DL(Op);
3913   // Cases that we can emit directly.
3914   if ((SrcVT == MVT::f64 || SrcVT == MVT::f32 ||
3915        (SrcVT == MVT::f16 && Subtarget->hasFullFP16())) &&
3916       DstVT == SatVT && (DstVT == MVT::i64 || DstVT == MVT::i32))
3917     return DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal,
3918                        DAG.getValueType(DstVT));
3919 
3920   // Otherwise we emit a cvt that saturates to a higher BW, and saturate the
3921   // result. This is only valid if the legal cvt is larger than the saturate
3922   // width.
3923   if (DstWidth < SatWidth)
3924     return SDValue();
3925 
3926   SDValue NativeCvt =
3927       DAG.getNode(Op.getOpcode(), DL, DstVT, SrcVal, DAG.getValueType(DstVT));
3928   SDValue Sat;
3929   if (Op.getOpcode() == ISD::FP_TO_SINT_SAT) {
3930     SDValue MinC = DAG.getConstant(
3931         APInt::getSignedMaxValue(SatWidth).sext(DstWidth), DL, DstVT);
3932     SDValue Min = DAG.getNode(ISD::SMIN, DL, DstVT, NativeCvt, MinC);
3933     SDValue MaxC = DAG.getConstant(
3934         APInt::getSignedMinValue(SatWidth).sext(DstWidth), DL, DstVT);
3935     Sat = DAG.getNode(ISD::SMAX, DL, DstVT, Min, MaxC);
3936   } else {
3937     SDValue MinC = DAG.getConstant(
3938         APInt::getAllOnesValue(SatWidth).zext(DstWidth), DL, DstVT);
3939     Sat = DAG.getNode(ISD::UMIN, DL, DstVT, NativeCvt, MinC);
3940   }
3941 
3942   return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Sat);
3943 }
3944 
3945 SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
3946                                                     SelectionDAG &DAG) const {
3947   // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp.
3948   // Any additional optimization in this function should be recorded
3949   // in the cost tables.
3950   bool IsStrict = Op->isStrictFPOpcode();
3951   EVT VT = Op.getValueType();
3952   SDLoc dl(Op);
3953   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
3954   EVT InVT = In.getValueType();
3955   unsigned Opc = Op.getOpcode();
3956   bool IsSigned = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
3957 
3958   if (VT.isScalableVector()) {
3959     if (InVT.getVectorElementType() == MVT::i1) {
3960       // We can't directly extend an SVE predicate; extend it first.
3961       unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3962       EVT CastVT = getPromotedVTForPredicate(InVT);
3963       In = DAG.getNode(CastOpc, dl, CastVT, In);
3964       return DAG.getNode(Opc, dl, VT, In);
3965     }
3966 
3967     unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
3968                                : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
3969     return LowerToPredicatedOp(Op, DAG, Opcode);
3970   }
3971 
3972   if (useSVEForFixedLengthVectorVT(VT) || useSVEForFixedLengthVectorVT(InVT))
3973     return LowerFixedLengthIntToFPToSVE(Op, DAG);
3974 
3975   uint64_t VTSize = VT.getFixedSizeInBits();
3976   uint64_t InVTSize = InVT.getFixedSizeInBits();
3977   if (VTSize < InVTSize) {
3978     MVT CastVT =
3979         MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
3980                          InVT.getVectorNumElements());
3981     if (IsStrict) {
3982       In = DAG.getNode(Opc, dl, {CastVT, MVT::Other},
3983                        {Op.getOperand(0), In});
3984       return DAG.getNode(
3985           ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
3986           {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)});
3987     }
3988     In = DAG.getNode(Opc, dl, CastVT, In);
3989     return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl));
3990   }
3991 
3992   if (VTSize > InVTSize) {
3993     unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3994     EVT CastVT = VT.changeVectorElementTypeToInteger();
3995     In = DAG.getNode(CastOpc, dl, CastVT, In);
3996     if (IsStrict)
3997       return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In});
3998     return DAG.getNode(Opc, dl, VT, In);
3999   }
4000 
4001   // Use a scalar operation for conversions between single-element vectors of
4002   // the same size.
4003   if (VT.getVectorNumElements() == 1) {
4004     SDValue Extract = DAG.getNode(
4005         ISD::EXTRACT_VECTOR_ELT, dl, InVT.getScalarType(),
4006         In, DAG.getConstant(0, dl, MVT::i64));
4007     EVT ScalarVT = VT.getScalarType();
4008     if (IsStrict)
4009       return DAG.getNode(Op.getOpcode(), dl, {ScalarVT, MVT::Other},
4010                          {Op.getOperand(0), Extract});
4011     return DAG.getNode(Op.getOpcode(), dl, ScalarVT, Extract);
4012   }
4013 
4014   return Op;
4015 }
4016 
4017 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
4018                                             SelectionDAG &DAG) const {
4019   if (Op.getValueType().isVector())
4020     return LowerVectorINT_TO_FP(Op, DAG);
4021 
4022   bool IsStrict = Op->isStrictFPOpcode();
4023   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
4024 
4025   // f16 conversions are promoted to f32 when full fp16 is not supported.
4026   if (Op.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
4027     SDLoc dl(Op);
4028     if (IsStrict) {
4029       SDValue Val = DAG.getNode(Op.getOpcode(), dl, {MVT::f32, MVT::Other},
4030                                 {Op.getOperand(0), SrcVal});
4031       return DAG.getNode(
4032           ISD::STRICT_FP_ROUND, dl, {MVT::f16, MVT::Other},
4033           {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
4034     }
4035     return DAG.getNode(
4036         ISD::FP_ROUND, dl, MVT::f16,
4037         DAG.getNode(Op.getOpcode(), dl, MVT::f32, SrcVal),
4038         DAG.getIntPtrConstant(0, dl));
4039   }
4040 
4041   // i128 conversions are libcalls.
4042   if (SrcVal.getValueType() == MVT::i128)
4043     return SDValue();
4044 
4045   // Other conversions are legal, unless it's to the completely software-based
4046   // fp128.
4047   if (Op.getValueType() != MVT::f128)
4048     return Op;
4049   return SDValue();
4050 }
4051 
4052 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
4053                                             SelectionDAG &DAG) const {
4054   // For iOS, we want to call an alternative entry point: __sincos_stret,
4055   // which returns the values in two S / D registers.
4056   SDLoc dl(Op);
4057   SDValue Arg = Op.getOperand(0);
4058   EVT ArgVT = Arg.getValueType();
4059   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
4060 
4061   ArgListTy Args;
4062   ArgListEntry Entry;
4063 
4064   Entry.Node = Arg;
4065   Entry.Ty = ArgTy;
4066   Entry.IsSExt = false;
4067   Entry.IsZExt = false;
4068   Args.push_back(Entry);
4069 
4070   RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64
4071                                         : RTLIB::SINCOS_STRET_F32;
4072   const char *LibcallName = getLibcallName(LC);
4073   SDValue Callee =
4074       DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
4075 
4076   StructType *RetTy = StructType::get(ArgTy, ArgTy);
4077   TargetLowering::CallLoweringInfo CLI(DAG);
4078   CLI.setDebugLoc(dl)
4079       .setChain(DAG.getEntryNode())
4080       .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
4081 
4082   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
4083   return CallResult.first;
4084 }
4085 
4086 static MVT getSVEContainerType(EVT ContentTy);
4087 
4088 SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
4089                                             SelectionDAG &DAG) const {
4090   EVT OpVT = Op.getValueType();
4091   EVT ArgVT = Op.getOperand(0).getValueType();
4092 
4093   if (useSVEForFixedLengthVectorVT(OpVT))
4094     return LowerFixedLengthBitcastToSVE(Op, DAG);
4095 
4096   if (OpVT.isScalableVector()) {
4097     // Bitcasting between unpacked vector types of different element counts is
4098     // not a NOP because the live elements are laid out differently.
4099     //                01234567
4100     // e.g. nxv2i32 = XX??XX??
4101     //      nxv4f16 = X?X?X?X?
4102     if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
4103       return SDValue();
4104 
4105     if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
4106       assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
4107              "Expected int->fp bitcast!");
4108       SDValue ExtResult =
4109           DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
4110                       Op.getOperand(0));
4111       return getSVESafeBitCast(OpVT, ExtResult, DAG);
4112     }
4113     return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
4114   }
4115 
4116   if (OpVT != MVT::f16 && OpVT != MVT::bf16)
4117     return SDValue();
4118 
4119   // Bitcasts between f16 and bf16 are legal.
4120   if (ArgVT == MVT::f16 || ArgVT == MVT::bf16)
4121     return Op;
4122 
4123   assert(ArgVT == MVT::i16);
4124   SDLoc DL(Op);
4125 
4126   Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0));
4127   Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
4128   return SDValue(
4129       DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, OpVT, Op,
4130                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
4131       0);
4132 }
4133 
4134 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
4135   if (OrigVT.getSizeInBits() >= 64)
4136     return OrigVT;
4137 
4138   assert(OrigVT.isSimple() && "Expecting a simple value type");
4139 
4140   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
4141   switch (OrigSimpleTy) {
4142   default: llvm_unreachable("Unexpected Vector Type");
4143   case MVT::v2i8:
4144   case MVT::v2i16:
4145      return MVT::v2i32;
4146   case MVT::v4i8:
4147     return  MVT::v4i16;
4148   }
4149 }
4150 
4151 static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
4152                                                  const EVT &OrigTy,
4153                                                  const EVT &ExtTy,
4154                                                  unsigned ExtOpcode) {
4155   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
4156   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
4157   // 64-bits we need to insert a new extension so that it will be 64-bits.
4158   assert(ExtTy.is128BitVector() && "Unexpected extension size");
4159   if (OrigTy.getSizeInBits() >= 64)
4160     return N;
4161 
4162   // Must extend size to at least 64 bits to be used as an operand for VMULL.
4163   EVT NewVT = getExtensionTo64Bits(OrigTy);
4164 
4165   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
4166 }
4167 
4168 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4169                                    bool isSigned) {
4170   EVT VT = N->getValueType(0);
4171 
4172   if (N->getOpcode() != ISD::BUILD_VECTOR)
4173     return false;
4174 
4175   for (const SDValue &Elt : N->op_values()) {
4176     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4177       unsigned EltSize = VT.getScalarSizeInBits();
4178       unsigned HalfSize = EltSize / 2;
4179       if (isSigned) {
4180         if (!isIntN(HalfSize, C->getSExtValue()))
4181           return false;
4182       } else {
4183         if (!isUIntN(HalfSize, C->getZExtValue()))
4184           return false;
4185       }
4186       continue;
4187     }
4188     return false;
4189   }
4190 
4191   return true;
4192 }
4193 
4194 static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
4195   if (N->getOpcode() == ISD::SIGN_EXTEND ||
4196       N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND)
4197     return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG,
4198                                              N->getOperand(0)->getValueType(0),
4199                                              N->getValueType(0),
4200                                              N->getOpcode());
4201 
4202   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4203   EVT VT = N->getValueType(0);
4204   SDLoc dl(N);
4205   unsigned EltSize = VT.getScalarSizeInBits() / 2;
4206   unsigned NumElts = VT.getVectorNumElements();
4207   MVT TruncVT = MVT::getIntegerVT(EltSize);
4208   SmallVector<SDValue, 8> Ops;
4209   for (unsigned i = 0; i != NumElts; ++i) {
4210     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4211     const APInt &CInt = C->getAPIntValue();
4212     // Element types smaller than 32 bits are not legal, so use i32 elements.
4213     // The values are implicitly truncated so sext vs. zext doesn't matter.
4214     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
4215   }
4216   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
4217 }
4218 
4219 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4220   return N->getOpcode() == ISD::SIGN_EXTEND ||
4221          N->getOpcode() == ISD::ANY_EXTEND ||
4222          isExtendedBUILD_VECTOR(N, DAG, true);
4223 }
4224 
4225 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4226   return N->getOpcode() == ISD::ZERO_EXTEND ||
4227          N->getOpcode() == ISD::ANY_EXTEND ||
4228          isExtendedBUILD_VECTOR(N, DAG, false);
4229 }
4230 
4231 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4232   unsigned Opcode = N->getOpcode();
4233   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4234     SDNode *N0 = N->getOperand(0).getNode();
4235     SDNode *N1 = N->getOperand(1).getNode();
4236     return N0->hasOneUse() && N1->hasOneUse() &&
4237       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4238   }
4239   return false;
4240 }
4241 
4242 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4243   unsigned Opcode = N->getOpcode();
4244   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4245     SDNode *N0 = N->getOperand(0).getNode();
4246     SDNode *N1 = N->getOperand(1).getNode();
4247     return N0->hasOneUse() && N1->hasOneUse() &&
4248       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4249   }
4250   return false;
4251 }
4252 
4253 SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4254                                                 SelectionDAG &DAG) const {
4255   // The rounding mode is in bits 23:22 of the FPSCR.
4256   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4257   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4258   // so that the shift + and get folded into a bitfield extract.
4259   SDLoc dl(Op);
4260 
4261   SDValue Chain = Op.getOperand(0);
4262   SDValue FPCR_64 = DAG.getNode(
4263       ISD::INTRINSIC_W_CHAIN, dl, {MVT::i64, MVT::Other},
4264       {Chain, DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, MVT::i64)});
4265   Chain = FPCR_64.getValue(1);
4266   SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64);
4267   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32,
4268                                   DAG.getConstant(1U << 22, dl, MVT::i32));
4269   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4270                               DAG.getConstant(22, dl, MVT::i32));
4271   SDValue AND = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4272                             DAG.getConstant(3, dl, MVT::i32));
4273   return DAG.getMergeValues({AND, Chain}, dl);
4274 }
4275 
4276 SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
4277                                                  SelectionDAG &DAG) const {
4278   SDLoc DL(Op);
4279   SDValue Chain = Op->getOperand(0);
4280   SDValue RMValue = Op->getOperand(1);
4281 
4282   // The rounding mode is in bits 23:22 of the FPCR.
4283   // The llvm.set.rounding argument value to the rounding mode in FPCR mapping
4284   // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is
4285   // ((arg - 1) & 3) << 22).
4286   //
4287   // The argument of llvm.set.rounding must be within the segment [0, 3], so
4288   // NearestTiesToAway (4) is not handled here. It is responsibility of the code
4289   // generated llvm.set.rounding to ensure this condition.
4290 
4291   // Calculate new value of FPCR[23:22].
4292   RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue,
4293                         DAG.getConstant(1, DL, MVT::i32));
4294   RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue,
4295                         DAG.getConstant(0x3, DL, MVT::i32));
4296   RMValue =
4297       DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue,
4298                   DAG.getConstant(AArch64::RoundingBitsPos, DL, MVT::i32));
4299   RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, RMValue);
4300 
4301   // Get current value of FPCR.
4302   SDValue Ops[] = {
4303       Chain, DAG.getTargetConstant(Intrinsic::aarch64_get_fpcr, DL, MVT::i64)};
4304   SDValue FPCR =
4305       DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i64, MVT::Other}, Ops);
4306   Chain = FPCR.getValue(1);
4307   FPCR = FPCR.getValue(0);
4308 
4309   // Put new rounding mode into FPSCR[23:22].
4310   const int RMMask = ~(AArch64::Rounding::rmMask << AArch64::RoundingBitsPos);
4311   FPCR = DAG.getNode(ISD::AND, DL, MVT::i64, FPCR,
4312                      DAG.getConstant(RMMask, DL, MVT::i64));
4313   FPCR = DAG.getNode(ISD::OR, DL, MVT::i64, FPCR, RMValue);
4314   SDValue Ops2[] = {
4315       Chain, DAG.getTargetConstant(Intrinsic::aarch64_set_fpcr, DL, MVT::i64),
4316       FPCR};
4317   return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2);
4318 }
4319 
4320 SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4321   EVT VT = Op.getValueType();
4322 
4323   // If SVE is available then i64 vector multiplications can also be made legal.
4324   bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64;
4325 
4326   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
4327     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
4328 
4329   // Multiplications are only custom-lowered for 128-bit vectors so that
4330   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
4331   assert(VT.is128BitVector() && VT.isInteger() &&
4332          "unexpected type for custom-lowering ISD::MUL");
4333   SDNode *N0 = Op.getOperand(0).getNode();
4334   SDNode *N1 = Op.getOperand(1).getNode();
4335   unsigned NewOpc = 0;
4336   bool isMLA = false;
4337   bool isN0SExt = isSignExtended(N0, DAG);
4338   bool isN1SExt = isSignExtended(N1, DAG);
4339   if (isN0SExt && isN1SExt)
4340     NewOpc = AArch64ISD::SMULL;
4341   else {
4342     bool isN0ZExt = isZeroExtended(N0, DAG);
4343     bool isN1ZExt = isZeroExtended(N1, DAG);
4344     if (isN0ZExt && isN1ZExt)
4345       NewOpc = AArch64ISD::UMULL;
4346     else if (isN1SExt || isN1ZExt) {
4347       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4348       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4349       if (isN1SExt && isAddSubSExt(N0, DAG)) {
4350         NewOpc = AArch64ISD::SMULL;
4351         isMLA = true;
4352       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4353         NewOpc =  AArch64ISD::UMULL;
4354         isMLA = true;
4355       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4356         std::swap(N0, N1);
4357         NewOpc =  AArch64ISD::UMULL;
4358         isMLA = true;
4359       }
4360     }
4361 
4362     if (!NewOpc) {
4363       if (VT == MVT::v2i64)
4364         // Fall through to expand this.  It is not legal.
4365         return SDValue();
4366       else
4367         // Other vector multiplications are legal.
4368         return Op;
4369     }
4370   }
4371 
4372   // Legalize to a S/UMULL instruction
4373   SDLoc DL(Op);
4374   SDValue Op0;
4375   SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
4376   if (!isMLA) {
4377     Op0 = skipExtensionForVectorMULL(N0, DAG);
4378     assert(Op0.getValueType().is64BitVector() &&
4379            Op1.getValueType().is64BitVector() &&
4380            "unexpected types for extended operands to VMULL");
4381     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4382   }
4383   // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during
4384   // isel lowering to take advantage of no-stall back to back s/umul + s/umla.
4385   // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57
4386   SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG);
4387   SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG);
4388   EVT Op1VT = Op1.getValueType();
4389   return DAG.getNode(N0->getOpcode(), DL, VT,
4390                      DAG.getNode(NewOpc, DL, VT,
4391                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4392                      DAG.getNode(NewOpc, DL, VT,
4393                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4394 }
4395 
4396 static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
4397                                int Pattern) {
4398   if (VT == MVT::nxv1i1 && Pattern == AArch64SVEPredPattern::all)
4399     return DAG.getConstant(1, DL, MVT::nxv1i1);
4400   return DAG.getNode(AArch64ISD::PTRUE, DL, VT,
4401                      DAG.getTargetConstant(Pattern, DL, MVT::i32));
4402 }
4403 
4404 // Returns a safe bitcast between two scalable vector predicates, where
4405 // any newly created lanes from a widening bitcast are defined as zero.
4406 static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {
4407   SDLoc DL(Op);
4408   EVT InVT = Op.getValueType();
4409 
4410   assert(InVT.getVectorElementType() == MVT::i1 &&
4411          VT.getVectorElementType() == MVT::i1 &&
4412          "Expected a predicate-to-predicate bitcast");
4413   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
4414          InVT.isScalableVector() &&
4415          DAG.getTargetLoweringInfo().isTypeLegal(InVT) &&
4416          "Only expect to cast between legal scalable predicate types!");
4417 
4418   // Return the operand if the cast isn't changing type,
4419   // e.g. <n x 16 x i1> -> <n x 16 x i1>
4420   if (InVT == VT)
4421     return Op;
4422 
4423   SDValue Reinterpret = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
4424 
4425   // We only have to zero the lanes if new lanes are being defined, e.g. when
4426   // casting from <vscale x 2 x i1> to <vscale x 16 x i1>. If this is not the
4427   // case (e.g. when casting from <vscale x 16 x i1> -> <vscale x 2 x i1>) then
4428   // we can return here.
4429   if (InVT.bitsGT(VT))
4430     return Reinterpret;
4431 
4432   // Check if the other lanes are already known to be zeroed by
4433   // construction.
4434   if (isZeroingInactiveLanes(Op))
4435     return Reinterpret;
4436 
4437   // Zero the newly introduced lanes.
4438   SDValue Mask = DAG.getConstant(1, DL, InVT);
4439   Mask = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Mask);
4440   return DAG.getNode(ISD::AND, DL, VT, Reinterpret, Mask);
4441 }
4442 
4443 SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4444                                                       SelectionDAG &DAG) const {
4445   unsigned IntNo = Op.getConstantOperandVal(1);
4446   SDLoc DL(Op);
4447   switch (IntNo) {
4448   default:
4449     return SDValue(); // Don't custom lower most intrinsics.
4450   case Intrinsic::aarch64_mops_memset_tag: {
4451     auto Node = cast<MemIntrinsicSDNode>(Op.getNode());
4452     SDValue Chain = Node->getChain();
4453     SDValue Dst = Op.getOperand(2);
4454     SDValue Val = Op.getOperand(3);
4455     Val = DAG.getAnyExtOrTrunc(Val, DL, MVT::i64);
4456     SDValue Size = Op.getOperand(4);
4457     auto Alignment = Node->getMemOperand()->getAlign();
4458     bool IsVol = Node->isVolatile();
4459     auto DstPtrInfo = Node->getPointerInfo();
4460 
4461     const auto &SDI =
4462         static_cast<const AArch64SelectionDAGInfo &>(DAG.getSelectionDAGInfo());
4463     SDValue MS =
4464         SDI.EmitMOPS(AArch64ISD::MOPS_MEMSET_TAGGING, DAG, DL, Chain, Dst, Val,
4465                      Size, Alignment, IsVol, DstPtrInfo, MachinePointerInfo{});
4466 
4467     // MOPS_MEMSET_TAGGING has 3 results (DstWb, SizeWb, Chain) whereas the
4468     // intrinsic has 2. So hide SizeWb using MERGE_VALUES. Otherwise
4469     // LowerOperationWrapper will complain that the number of results has
4470     // changed.
4471     return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL);
4472   }
4473   case Intrinsic::aarch64_sme_get_pstatesm: {
4474     SDValue Chain = Op.getOperand(0);
4475     SDValue MRS = DAG.getNode(
4476         AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
4477         Chain, DAG.getConstant(AArch64SysReg::SVCR, DL, MVT::i64));
4478     SDValue Mask = DAG.getConstant(/* PSTATE.SM */ 1, DL, MVT::i64);
4479     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, MRS, Mask);
4480     return DAG.getMergeValues({And, Chain}, DL);
4481   }
4482   }
4483 }
4484 
4485 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4486                                                      SelectionDAG &DAG) const {
4487   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4488   SDLoc dl(Op);
4489   switch (IntNo) {
4490   default: return SDValue();    // Don't custom lower most intrinsics.
4491   case Intrinsic::thread_pointer: {
4492     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4493     return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
4494   }
4495   case Intrinsic::aarch64_neon_abs: {
4496     EVT Ty = Op.getValueType();
4497     if (Ty == MVT::i64) {
4498       SDValue Result = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64,
4499                                    Op.getOperand(1));
4500       Result = DAG.getNode(ISD::ABS, dl, MVT::v1i64, Result);
4501       return DAG.getNode(ISD::BITCAST, dl, MVT::i64, Result);
4502     } else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
4503       return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
4504     } else {
4505       report_fatal_error("Unexpected type for AArch64 NEON intrinic");
4506     }
4507   }
4508   case Intrinsic::aarch64_neon_smax:
4509     return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
4510                        Op.getOperand(1), Op.getOperand(2));
4511   case Intrinsic::aarch64_neon_umax:
4512     return DAG.getNode(ISD::UMAX, dl, Op.getValueType(),
4513                        Op.getOperand(1), Op.getOperand(2));
4514   case Intrinsic::aarch64_neon_smin:
4515     return DAG.getNode(ISD::SMIN, dl, Op.getValueType(),
4516                        Op.getOperand(1), Op.getOperand(2));
4517   case Intrinsic::aarch64_neon_umin:
4518     return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
4519                        Op.getOperand(1), Op.getOperand(2));
4520 
4521   case Intrinsic::aarch64_sve_sunpkhi:
4522     return DAG.getNode(AArch64ISD::SUNPKHI, dl, Op.getValueType(),
4523                        Op.getOperand(1));
4524   case Intrinsic::aarch64_sve_sunpklo:
4525     return DAG.getNode(AArch64ISD::SUNPKLO, dl, Op.getValueType(),
4526                        Op.getOperand(1));
4527   case Intrinsic::aarch64_sve_uunpkhi:
4528     return DAG.getNode(AArch64ISD::UUNPKHI, dl, Op.getValueType(),
4529                        Op.getOperand(1));
4530   case Intrinsic::aarch64_sve_uunpklo:
4531     return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(),
4532                        Op.getOperand(1));
4533   case Intrinsic::aarch64_sve_clasta_n:
4534     return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(),
4535                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4536   case Intrinsic::aarch64_sve_clastb_n:
4537     return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(),
4538                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4539   case Intrinsic::aarch64_sve_lasta:
4540     return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(),
4541                        Op.getOperand(1), Op.getOperand(2));
4542   case Intrinsic::aarch64_sve_lastb:
4543     return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(),
4544                        Op.getOperand(1), Op.getOperand(2));
4545   case Intrinsic::aarch64_sve_rev:
4546     return DAG.getNode(ISD::VECTOR_REVERSE, dl, Op.getValueType(),
4547                        Op.getOperand(1));
4548   case Intrinsic::aarch64_sve_tbl:
4549     return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(),
4550                        Op.getOperand(1), Op.getOperand(2));
4551   case Intrinsic::aarch64_sve_trn1:
4552     return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(),
4553                        Op.getOperand(1), Op.getOperand(2));
4554   case Intrinsic::aarch64_sve_trn2:
4555     return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(),
4556                        Op.getOperand(1), Op.getOperand(2));
4557   case Intrinsic::aarch64_sve_uzp1:
4558     return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(),
4559                        Op.getOperand(1), Op.getOperand(2));
4560   case Intrinsic::aarch64_sve_uzp2:
4561     return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(),
4562                        Op.getOperand(1), Op.getOperand(2));
4563   case Intrinsic::aarch64_sve_zip1:
4564     return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(),
4565                        Op.getOperand(1), Op.getOperand(2));
4566   case Intrinsic::aarch64_sve_zip2:
4567     return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(),
4568                        Op.getOperand(1), Op.getOperand(2));
4569   case Intrinsic::aarch64_sve_splice:
4570     return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
4571                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4572   case Intrinsic::aarch64_sve_ptrue:
4573     return getPTrue(DAG, dl, Op.getValueType(),
4574                     cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
4575   case Intrinsic::aarch64_sve_clz:
4576     return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
4577                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4578   case Intrinsic::aarch64_sme_cntsb:
4579     return DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4580                        DAG.getConstant(1, dl, MVT::i32));
4581   case Intrinsic::aarch64_sme_cntsh: {
4582     SDValue One = DAG.getConstant(1, dl, MVT::i32);
4583     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(), One);
4584     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes, One);
4585   }
4586   case Intrinsic::aarch64_sme_cntsw: {
4587     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4588                                 DAG.getConstant(1, dl, MVT::i32));
4589     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4590                        DAG.getConstant(2, dl, MVT::i32));
4591   }
4592   case Intrinsic::aarch64_sme_cntsd: {
4593     SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, dl, Op.getValueType(),
4594                                 DAG.getConstant(1, dl, MVT::i32));
4595     return DAG.getNode(ISD::SRL, dl, Op.getValueType(), Bytes,
4596                        DAG.getConstant(3, dl, MVT::i32));
4597   }
4598   case Intrinsic::aarch64_sve_cnt: {
4599     SDValue Data = Op.getOperand(3);
4600     // CTPOP only supports integer operands.
4601     if (Data.getValueType().isFloatingPoint())
4602       Data = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Data);
4603     return DAG.getNode(AArch64ISD::CTPOP_MERGE_PASSTHRU, dl, Op.getValueType(),
4604                        Op.getOperand(2), Data, Op.getOperand(1));
4605   }
4606   case Intrinsic::aarch64_sve_dupq_lane:
4607     return LowerDUPQLane(Op, DAG);
4608   case Intrinsic::aarch64_sve_convert_from_svbool:
4609     return getSVEPredicateBitCast(Op.getValueType(), Op.getOperand(1), DAG);
4610   case Intrinsic::aarch64_sve_convert_to_svbool:
4611     return getSVEPredicateBitCast(MVT::nxv16i1, Op.getOperand(1), DAG);
4612   case Intrinsic::aarch64_sve_fneg:
4613     return DAG.getNode(AArch64ISD::FNEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4614                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4615   case Intrinsic::aarch64_sve_frintp:
4616     return DAG.getNode(AArch64ISD::FCEIL_MERGE_PASSTHRU, dl, Op.getValueType(),
4617                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4618   case Intrinsic::aarch64_sve_frintm:
4619     return DAG.getNode(AArch64ISD::FFLOOR_MERGE_PASSTHRU, dl, Op.getValueType(),
4620                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4621   case Intrinsic::aarch64_sve_frinti:
4622     return DAG.getNode(AArch64ISD::FNEARBYINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4623                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4624   case Intrinsic::aarch64_sve_frintx:
4625     return DAG.getNode(AArch64ISD::FRINT_MERGE_PASSTHRU, dl, Op.getValueType(),
4626                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4627   case Intrinsic::aarch64_sve_frinta:
4628     return DAG.getNode(AArch64ISD::FROUND_MERGE_PASSTHRU, dl, Op.getValueType(),
4629                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4630   case Intrinsic::aarch64_sve_frintn:
4631     return DAG.getNode(AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU, dl, Op.getValueType(),
4632                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4633   case Intrinsic::aarch64_sve_frintz:
4634     return DAG.getNode(AArch64ISD::FTRUNC_MERGE_PASSTHRU, dl, Op.getValueType(),
4635                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4636   case Intrinsic::aarch64_sve_ucvtf:
4637     return DAG.getNode(AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU, dl,
4638                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4639                        Op.getOperand(1));
4640   case Intrinsic::aarch64_sve_scvtf:
4641     return DAG.getNode(AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU, dl,
4642                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4643                        Op.getOperand(1));
4644   case Intrinsic::aarch64_sve_fcvtzu:
4645     return DAG.getNode(AArch64ISD::FCVTZU_MERGE_PASSTHRU, dl,
4646                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4647                        Op.getOperand(1));
4648   case Intrinsic::aarch64_sve_fcvtzs:
4649     return DAG.getNode(AArch64ISD::FCVTZS_MERGE_PASSTHRU, dl,
4650                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4651                        Op.getOperand(1));
4652   case Intrinsic::aarch64_sve_fsqrt:
4653     return DAG.getNode(AArch64ISD::FSQRT_MERGE_PASSTHRU, dl, Op.getValueType(),
4654                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4655   case Intrinsic::aarch64_sve_frecpx:
4656     return DAG.getNode(AArch64ISD::FRECPX_MERGE_PASSTHRU, dl, Op.getValueType(),
4657                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4658   case Intrinsic::aarch64_sve_frecpe_x:
4659     return DAG.getNode(AArch64ISD::FRECPE, dl, Op.getValueType(),
4660                        Op.getOperand(1));
4661   case Intrinsic::aarch64_sve_frecps_x:
4662     return DAG.getNode(AArch64ISD::FRECPS, dl, Op.getValueType(),
4663                        Op.getOperand(1), Op.getOperand(2));
4664   case Intrinsic::aarch64_sve_frsqrte_x:
4665     return DAG.getNode(AArch64ISD::FRSQRTE, dl, Op.getValueType(),
4666                        Op.getOperand(1));
4667   case Intrinsic::aarch64_sve_frsqrts_x:
4668     return DAG.getNode(AArch64ISD::FRSQRTS, dl, Op.getValueType(),
4669                        Op.getOperand(1), Op.getOperand(2));
4670   case Intrinsic::aarch64_sve_fabs:
4671     return DAG.getNode(AArch64ISD::FABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4672                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4673   case Intrinsic::aarch64_sve_abs:
4674     return DAG.getNode(AArch64ISD::ABS_MERGE_PASSTHRU, dl, Op.getValueType(),
4675                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4676   case Intrinsic::aarch64_sve_neg:
4677     return DAG.getNode(AArch64ISD::NEG_MERGE_PASSTHRU, dl, Op.getValueType(),
4678                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4679   case Intrinsic::aarch64_sve_insr: {
4680     SDValue Scalar = Op.getOperand(2);
4681     EVT ScalarTy = Scalar.getValueType();
4682     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
4683       Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
4684 
4685     return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
4686                        Op.getOperand(1), Scalar);
4687   }
4688   case Intrinsic::aarch64_sve_rbit:
4689     return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
4690                        Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
4691                        Op.getOperand(1));
4692   case Intrinsic::aarch64_sve_revb:
4693     return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
4694                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4695   case Intrinsic::aarch64_sve_revh:
4696     return DAG.getNode(AArch64ISD::REVH_MERGE_PASSTHRU, dl, Op.getValueType(),
4697                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4698   case Intrinsic::aarch64_sve_revw:
4699     return DAG.getNode(AArch64ISD::REVW_MERGE_PASSTHRU, dl, Op.getValueType(),
4700                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4701   case Intrinsic::aarch64_sve_revd:
4702     return DAG.getNode(AArch64ISD::REVD_MERGE_PASSTHRU, dl, Op.getValueType(),
4703                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
4704   case Intrinsic::aarch64_sve_sxtb:
4705     return DAG.getNode(
4706         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4707         Op.getOperand(2), Op.getOperand(3),
4708         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4709         Op.getOperand(1));
4710   case Intrinsic::aarch64_sve_sxth:
4711     return DAG.getNode(
4712         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4713         Op.getOperand(2), Op.getOperand(3),
4714         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4715         Op.getOperand(1));
4716   case Intrinsic::aarch64_sve_sxtw:
4717     return DAG.getNode(
4718         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4719         Op.getOperand(2), Op.getOperand(3),
4720         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4721         Op.getOperand(1));
4722   case Intrinsic::aarch64_sve_uxtb:
4723     return DAG.getNode(
4724         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4725         Op.getOperand(2), Op.getOperand(3),
4726         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
4727         Op.getOperand(1));
4728   case Intrinsic::aarch64_sve_uxth:
4729     return DAG.getNode(
4730         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4731         Op.getOperand(2), Op.getOperand(3),
4732         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
4733         Op.getOperand(1));
4734   case Intrinsic::aarch64_sve_uxtw:
4735     return DAG.getNode(
4736         AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
4737         Op.getOperand(2), Op.getOperand(3),
4738         DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
4739         Op.getOperand(1));
4740   case Intrinsic::localaddress: {
4741     const auto &MF = DAG.getMachineFunction();
4742     const auto *RegInfo = Subtarget->getRegisterInfo();
4743     unsigned Reg = RegInfo->getLocalAddressRegister(MF);
4744     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
4745                               Op.getSimpleValueType());
4746   }
4747 
4748   case Intrinsic::eh_recoverfp: {
4749     // FIXME: This needs to be implemented to correctly handle highly aligned
4750     // stack objects. For now we simply return the incoming FP. Refer D53541
4751     // for more details.
4752     SDValue FnOp = Op.getOperand(1);
4753     SDValue IncomingFPOp = Op.getOperand(2);
4754     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
4755     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
4756     if (!Fn)
4757       report_fatal_error(
4758           "llvm.eh.recoverfp must take a function as the first argument");
4759     return IncomingFPOp;
4760   }
4761 
4762   case Intrinsic::aarch64_neon_vsri:
4763   case Intrinsic::aarch64_neon_vsli: {
4764     EVT Ty = Op.getValueType();
4765 
4766     if (!Ty.isVector())
4767       report_fatal_error("Unexpected type for aarch64_neon_vsli");
4768 
4769     assert(Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
4770 
4771     bool IsShiftRight = IntNo == Intrinsic::aarch64_neon_vsri;
4772     unsigned Opcode = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
4773     return DAG.getNode(Opcode, dl, Ty, Op.getOperand(1), Op.getOperand(2),
4774                        Op.getOperand(3));
4775   }
4776 
4777   case Intrinsic::aarch64_neon_srhadd:
4778   case Intrinsic::aarch64_neon_urhadd:
4779   case Intrinsic::aarch64_neon_shadd:
4780   case Intrinsic::aarch64_neon_uhadd: {
4781     bool IsSignedAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4782                         IntNo == Intrinsic::aarch64_neon_shadd);
4783     bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
4784                           IntNo == Intrinsic::aarch64_neon_urhadd);
4785     unsigned Opcode = IsSignedAdd
4786                           ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
4787                           : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
4788     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4789                        Op.getOperand(2));
4790   }
4791   case Intrinsic::aarch64_neon_sabd:
4792   case Intrinsic::aarch64_neon_uabd: {
4793     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uabd ? ISD::ABDU
4794                                                             : ISD::ABDS;
4795     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4796                        Op.getOperand(2));
4797   }
4798   case Intrinsic::aarch64_neon_saddlp:
4799   case Intrinsic::aarch64_neon_uaddlp: {
4800     unsigned Opcode = IntNo == Intrinsic::aarch64_neon_uaddlp
4801                           ? AArch64ISD::UADDLP
4802                           : AArch64ISD::SADDLP;
4803     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1));
4804   }
4805   case Intrinsic::aarch64_neon_sdot:
4806   case Intrinsic::aarch64_neon_udot:
4807   case Intrinsic::aarch64_sve_sdot:
4808   case Intrinsic::aarch64_sve_udot: {
4809     unsigned Opcode = (IntNo == Intrinsic::aarch64_neon_udot ||
4810                        IntNo == Intrinsic::aarch64_sve_udot)
4811                           ? AArch64ISD::UDOT
4812                           : AArch64ISD::SDOT;
4813     return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
4814                        Op.getOperand(2), Op.getOperand(3));
4815   }
4816   case Intrinsic::get_active_lane_mask: {
4817     SDValue ID =
4818         DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64);
4819     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID,
4820                        Op.getOperand(1), Op.getOperand(2));
4821   }
4822   }
4823 }
4824 
4825 bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {
4826   if (VT.getVectorElementType() == MVT::i8 ||
4827       VT.getVectorElementType() == MVT::i16) {
4828     EltTy = MVT::i32;
4829     return true;
4830   }
4831   return false;
4832 }
4833 
4834 bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
4835                                                           EVT DataVT) const {
4836   // SVE only supports implicit extension of 32-bit indices.
4837   if (!Subtarget->hasSVE() || IndexVT.getVectorElementType() != MVT::i32)
4838     return false;
4839 
4840   // Indices cannot be smaller than the main data type.
4841   if (IndexVT.getScalarSizeInBits() < DataVT.getScalarSizeInBits())
4842     return false;
4843 
4844   // Scalable vectors with "vscale * 2" or fewer elements sit within a 64-bit
4845   // element container type, which would violate the previous clause.
4846   return DataVT.isFixedLengthVector() || DataVT.getVectorMinNumElements() > 2;
4847 }
4848 
4849 bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
4850   return ExtVal.getValueType().isScalableVector() ||
4851          useSVEForFixedLengthVectorVT(
4852              ExtVal.getValueType(),
4853              /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors());
4854 }
4855 
4856 unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
4857   std::map<std::tuple<bool, bool, bool>, unsigned> AddrModes = {
4858       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ false),
4859        AArch64ISD::GLD1_MERGE_ZERO},
4860       {std::make_tuple(/*Scaled*/ false, /*Signed*/ false, /*Extend*/ true),
4861        AArch64ISD::GLD1_UXTW_MERGE_ZERO},
4862       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ false),
4863        AArch64ISD::GLD1_MERGE_ZERO},
4864       {std::make_tuple(/*Scaled*/ false, /*Signed*/ true, /*Extend*/ true),
4865        AArch64ISD::GLD1_SXTW_MERGE_ZERO},
4866       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ false),
4867        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4868       {std::make_tuple(/*Scaled*/ true, /*Signed*/ false, /*Extend*/ true),
4869        AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO},
4870       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ false),
4871        AArch64ISD::GLD1_SCALED_MERGE_ZERO},
4872       {std::make_tuple(/*Scaled*/ true, /*Signed*/ true, /*Extend*/ true),
4873        AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO},
4874   };
4875   auto Key = std::make_tuple(IsScaled, IsSigned, NeedsExtend);
4876   return AddrModes.find(Key)->second;
4877 }
4878 
4879 unsigned getSignExtendedGatherOpcode(unsigned Opcode) {
4880   switch (Opcode) {
4881   default:
4882     llvm_unreachable("unimplemented opcode");
4883     return Opcode;
4884   case AArch64ISD::GLD1_MERGE_ZERO:
4885     return AArch64ISD::GLD1S_MERGE_ZERO;
4886   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
4887     return AArch64ISD::GLD1S_IMM_MERGE_ZERO;
4888   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
4889     return AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
4890   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
4891     return AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
4892   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
4893     return AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
4894   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
4895     return AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
4896   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
4897     return AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
4898   }
4899 }
4900 
4901 SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
4902                                             SelectionDAG &DAG) const {
4903   MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(Op);
4904 
4905   SDLoc DL(Op);
4906   SDValue Chain = MGT->getChain();
4907   SDValue PassThru = MGT->getPassThru();
4908   SDValue Mask = MGT->getMask();
4909   SDValue BasePtr = MGT->getBasePtr();
4910   SDValue Index = MGT->getIndex();
4911   SDValue Scale = MGT->getScale();
4912   EVT VT = Op.getValueType();
4913   EVT MemVT = MGT->getMemoryVT();
4914   ISD::LoadExtType ExtType = MGT->getExtensionType();
4915   ISD::MemIndexType IndexType = MGT->getIndexType();
4916 
4917   // SVE supports zero (and so undef) passthrough values only, everything else
4918   // must be handled manually by an explicit select on the load's output.
4919   if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
4920     SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
4921     SDValue Load =
4922         DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4923                             MGT->getMemOperand(), IndexType, ExtType);
4924     SDValue Select = DAG.getSelect(DL, VT, Mask, Load, PassThru);
4925     return DAG.getMergeValues({Select, Load.getValue(1)}, DL);
4926   }
4927 
4928   bool IsScaled = MGT->isIndexScaled();
4929   bool IsSigned = MGT->isIndexSigned();
4930 
4931   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
4932   // must be calculated before hand.
4933   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
4934   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
4935     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
4936     EVT IndexVT = Index.getValueType();
4937     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
4938                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
4939     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
4940 
4941     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
4942     return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
4943                                MGT->getMemOperand(), IndexType, ExtType);
4944   }
4945 
4946   // Lower fixed length gather to a scalable equivalent.
4947   if (VT.isFixedLengthVector()) {
4948     assert(Subtarget->useSVEForFixedLengthVectors() &&
4949            "Cannot lower when not using SVE for fixed vectors!");
4950 
4951     // NOTE: Handle floating-point as if integer then bitcast the result.
4952     EVT DataVT = VT.changeVectorElementTypeToInteger();
4953     MemVT = MemVT.changeVectorElementTypeToInteger();
4954 
4955     // Find the smallest integer fixed length vector we can use for the gather.
4956     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
4957     if (DataVT.getVectorElementType() == MVT::i64 ||
4958         Index.getValueType().getVectorElementType() == MVT::i64 ||
4959         Mask.getValueType().getVectorElementType() == MVT::i64)
4960       PromotedVT = VT.changeVectorElementType(MVT::i64);
4961 
4962     // Promote vector operands except for passthrough, which we know is either
4963     // undef or zero, and thus best constructed directly.
4964     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4965     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
4966     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
4967 
4968     // A promoted result type forces the need for an extending load.
4969     if (PromotedVT != DataVT && ExtType == ISD::NON_EXTLOAD)
4970       ExtType = ISD::EXTLOAD;
4971 
4972     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
4973 
4974     // Convert fixed length vector operands to scalable.
4975     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
4976     Index = convertToScalableVector(DAG, ContainerVT, Index);
4977     Mask = convertFixedMaskToScalableVector(Mask, DAG);
4978     PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
4979                                    : DAG.getConstant(0, DL, ContainerVT);
4980 
4981     // Emit equivalent scalable vector gather.
4982     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
4983     SDValue Load =
4984         DAG.getMaskedGather(DAG.getVTList(ContainerVT, MVT::Other), MemVT, DL,
4985                             Ops, MGT->getMemOperand(), IndexType, ExtType);
4986 
4987     // Extract fixed length data then convert to the required result type.
4988     SDValue Result = convertFromScalableVector(DAG, PromotedVT, Load);
4989     Result = DAG.getNode(ISD::TRUNCATE, DL, DataVT, Result);
4990     if (VT.isFloatingPoint())
4991       Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
4992 
4993     return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
4994   }
4995 
4996   // Everything else is legal.
4997   return Op;
4998 }
4999 
5000 SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
5001                                              SelectionDAG &DAG) const {
5002   MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(Op);
5003 
5004   SDLoc DL(Op);
5005   SDValue Chain = MSC->getChain();
5006   SDValue StoreVal = MSC->getValue();
5007   SDValue Mask = MSC->getMask();
5008   SDValue BasePtr = MSC->getBasePtr();
5009   SDValue Index = MSC->getIndex();
5010   SDValue Scale = MSC->getScale();
5011   EVT VT = StoreVal.getValueType();
5012   EVT MemVT = MSC->getMemoryVT();
5013   ISD::MemIndexType IndexType = MSC->getIndexType();
5014   bool Truncating = MSC->isTruncatingStore();
5015 
5016   bool IsScaled = MSC->isIndexScaled();
5017   bool IsSigned = MSC->isIndexSigned();
5018 
5019   // SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
5020   // must be calculated before hand.
5021   uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
5022   if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
5023     assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
5024     EVT IndexVT = Index.getValueType();
5025     Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index,
5026                         DAG.getConstant(Log2_32(ScaleVal), DL, IndexVT));
5027     Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
5028 
5029     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5030     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5031                                 MSC->getMemOperand(), IndexType, Truncating);
5032   }
5033 
5034   // Lower fixed length scatter to a scalable equivalent.
5035   if (VT.isFixedLengthVector()) {
5036     assert(Subtarget->useSVEForFixedLengthVectors() &&
5037            "Cannot lower when not using SVE for fixed vectors!");
5038 
5039     // Once bitcast we treat floating-point scatters as if integer.
5040     if (VT.isFloatingPoint()) {
5041       VT = VT.changeVectorElementTypeToInteger();
5042       MemVT = MemVT.changeVectorElementTypeToInteger();
5043       StoreVal = DAG.getNode(ISD::BITCAST, DL, VT, StoreVal);
5044     }
5045 
5046     // Find the smallest integer fixed length vector we can use for the scatter.
5047     EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
5048     if (VT.getVectorElementType() == MVT::i64 ||
5049         Index.getValueType().getVectorElementType() == MVT::i64 ||
5050         Mask.getValueType().getVectorElementType() == MVT::i64)
5051       PromotedVT = VT.changeVectorElementType(MVT::i64);
5052 
5053     // Promote vector operands.
5054     unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5055     Index = DAG.getNode(ExtOpcode, DL, PromotedVT, Index);
5056     Mask = DAG.getNode(ISD::SIGN_EXTEND, DL, PromotedVT, Mask);
5057     StoreVal = DAG.getNode(ISD::ANY_EXTEND, DL, PromotedVT, StoreVal);
5058 
5059     // A promoted value type forces the need for a truncating store.
5060     if (PromotedVT != VT)
5061       Truncating = true;
5062 
5063     EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
5064 
5065     // Convert fixed length vector operands to scalable.
5066     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
5067     Index = convertToScalableVector(DAG, ContainerVT, Index);
5068     Mask = convertFixedMaskToScalableVector(Mask, DAG);
5069     StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
5070 
5071     // Emit equivalent scalable vector scatter.
5072     SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
5073     return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
5074                                 MSC->getMemOperand(), IndexType, Truncating);
5075   }
5076 
5077   // Everything else is legal.
5078   return Op;
5079 }
5080 
5081 SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
5082   SDLoc DL(Op);
5083   MaskedLoadSDNode *LoadNode = cast<MaskedLoadSDNode>(Op);
5084   assert(LoadNode && "Expected custom lowering of a masked load node");
5085   EVT VT = Op->getValueType(0);
5086 
5087   if (useSVEForFixedLengthVectorVT(
5088           VT,
5089           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5090     return LowerFixedLengthVectorMLoadToSVE(Op, DAG);
5091 
5092   SDValue PassThru = LoadNode->getPassThru();
5093   SDValue Mask = LoadNode->getMask();
5094 
5095   if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
5096     return Op;
5097 
5098   SDValue Load = DAG.getMaskedLoad(
5099       VT, DL, LoadNode->getChain(), LoadNode->getBasePtr(),
5100       LoadNode->getOffset(), Mask, DAG.getUNDEF(VT), LoadNode->getMemoryVT(),
5101       LoadNode->getMemOperand(), LoadNode->getAddressingMode(),
5102       LoadNode->getExtensionType());
5103 
5104   SDValue Result = DAG.getSelect(DL, VT, Mask, Load, PassThru);
5105 
5106   return DAG.getMergeValues({Result, Load.getValue(1)}, DL);
5107 }
5108 
5109 // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
5110 static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
5111                                         EVT VT, EVT MemVT,
5112                                         SelectionDAG &DAG) {
5113   assert(VT.isVector() && "VT should be a vector type");
5114   assert(MemVT == MVT::v4i8 && VT == MVT::v4i16);
5115 
5116   SDValue Value = ST->getValue();
5117 
5118   // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract
5119   // the word lane which represent the v4i8 subvector.  It optimizes the store
5120   // to:
5121   //
5122   //   xtn  v0.8b, v0.8h
5123   //   str  s0, [x0]
5124 
5125   SDValue Undef = DAG.getUNDEF(MVT::i16);
5126   SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL,
5127                                         {Undef, Undef, Undef, Undef});
5128 
5129   SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16,
5130                                  Value, UndefVec);
5131   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt);
5132 
5133   Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc);
5134   SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
5135                                      Trunc, DAG.getConstant(0, DL, MVT::i64));
5136 
5137   return DAG.getStore(ST->getChain(), DL, ExtractTrunc,
5138                       ST->getBasePtr(), ST->getMemOperand());
5139 }
5140 
5141 // Custom lowering for any store, vector or scalar and/or default or with
5142 // a truncate operations.  Currently only custom lower truncate operation
5143 // from vector v4i16 to v4i8 or volatile stores of i128.
5144 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
5145                                           SelectionDAG &DAG) const {
5146   SDLoc Dl(Op);
5147   StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
5148   assert (StoreNode && "Can only custom lower store nodes");
5149 
5150   SDValue Value = StoreNode->getValue();
5151 
5152   EVT VT = Value.getValueType();
5153   EVT MemVT = StoreNode->getMemoryVT();
5154 
5155   if (VT.isVector()) {
5156     if (useSVEForFixedLengthVectorVT(
5157             VT,
5158             /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
5159       return LowerFixedLengthVectorStoreToSVE(Op, DAG);
5160 
5161     unsigned AS = StoreNode->getAddressSpace();
5162     Align Alignment = StoreNode->getAlign();
5163     if (Alignment < MemVT.getStoreSize() &&
5164         !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
5165                                         StoreNode->getMemOperand()->getFlags(),
5166                                         nullptr)) {
5167       return scalarizeVectorStore(StoreNode, DAG);
5168     }
5169 
5170     if (StoreNode->isTruncatingStore() && VT == MVT::v4i16 &&
5171         MemVT == MVT::v4i8) {
5172       return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG);
5173     }
5174     // 256 bit non-temporal stores can be lowered to STNP. Do this as part of
5175     // the custom lowering, as there are no un-paired non-temporal stores and
5176     // legalization will break up 256 bit inputs.
5177     ElementCount EC = MemVT.getVectorElementCount();
5178     if (StoreNode->isNonTemporal() && MemVT.getSizeInBits() == 256u &&
5179         EC.isKnownEven() &&
5180         ((MemVT.getScalarSizeInBits() == 8u ||
5181           MemVT.getScalarSizeInBits() == 16u ||
5182           MemVT.getScalarSizeInBits() == 32u ||
5183           MemVT.getScalarSizeInBits() == 64u))) {
5184       SDValue Lo =
5185           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5186                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5187                       StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64));
5188       SDValue Hi =
5189           DAG.getNode(ISD::EXTRACT_SUBVECTOR, Dl,
5190                       MemVT.getHalfNumVectorElementsVT(*DAG.getContext()),
5191                       StoreNode->getValue(),
5192                       DAG.getConstant(EC.getKnownMinValue() / 2, Dl, MVT::i64));
5193       SDValue Result = DAG.getMemIntrinsicNode(
5194           AArch64ISD::STNP, Dl, DAG.getVTList(MVT::Other),
5195           {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5196           StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5197       return Result;
5198     }
5199   } else if (MemVT == MVT::i128 && StoreNode->isVolatile()) {
5200     return LowerStore128(Op, DAG);
5201   } else if (MemVT == MVT::i64x8) {
5202     SDValue Value = StoreNode->getValue();
5203     assert(Value->getValueType(0) == MVT::i64x8);
5204     SDValue Chain = StoreNode->getChain();
5205     SDValue Base = StoreNode->getBasePtr();
5206     EVT PtrVT = Base.getValueType();
5207     for (unsigned i = 0; i < 8; i++) {
5208       SDValue Part = DAG.getNode(AArch64ISD::LS64_EXTRACT, Dl, MVT::i64,
5209                                  Value, DAG.getConstant(i, Dl, MVT::i32));
5210       SDValue Ptr = DAG.getNode(ISD::ADD, Dl, PtrVT, Base,
5211                                 DAG.getConstant(i * 8, Dl, PtrVT));
5212       Chain = DAG.getStore(Chain, Dl, Part, Ptr, StoreNode->getPointerInfo(),
5213                            StoreNode->getOriginalAlign());
5214     }
5215     return Chain;
5216   }
5217 
5218   return SDValue();
5219 }
5220 
5221 /// Lower atomic or volatile 128-bit stores to a single STP instruction.
5222 SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
5223                                              SelectionDAG &DAG) const {
5224   MemSDNode *StoreNode = cast<MemSDNode>(Op);
5225   assert(StoreNode->getMemoryVT() == MVT::i128);
5226   assert(StoreNode->isVolatile() || StoreNode->isAtomic());
5227   assert(!StoreNode->isAtomic() ||
5228          StoreNode->getMergedOrdering() == AtomicOrdering::Unordered ||
5229          StoreNode->getMergedOrdering() == AtomicOrdering::Monotonic);
5230 
5231   SDValue Value = StoreNode->getOpcode() == ISD::STORE
5232                       ? StoreNode->getOperand(1)
5233                       : StoreNode->getOperand(2);
5234   SDLoc DL(Op);
5235   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5236                            DAG.getConstant(0, DL, MVT::i64));
5237   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, Value,
5238                            DAG.getConstant(1, DL, MVT::i64));
5239   SDValue Result = DAG.getMemIntrinsicNode(
5240       AArch64ISD::STP, DL, DAG.getVTList(MVT::Other),
5241       {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
5242       StoreNode->getMemoryVT(), StoreNode->getMemOperand());
5243   return Result;
5244 }
5245 
5246 SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
5247                                          SelectionDAG &DAG) const {
5248   SDLoc DL(Op);
5249   LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
5250   assert(LoadNode && "Expected custom lowering of a load node");
5251 
5252   if (LoadNode->getMemoryVT() == MVT::i64x8) {
5253     SmallVector<SDValue, 8> Ops;
5254     SDValue Base = LoadNode->getBasePtr();
5255     SDValue Chain = LoadNode->getChain();
5256     EVT PtrVT = Base.getValueType();
5257     for (unsigned i = 0; i < 8; i++) {
5258       SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
5259                                 DAG.getConstant(i * 8, DL, PtrVT));
5260       SDValue Part = DAG.getLoad(MVT::i64, DL, Chain, Ptr,
5261                                  LoadNode->getPointerInfo(),
5262                                  LoadNode->getOriginalAlign());
5263       Ops.push_back(Part);
5264       Chain = SDValue(Part.getNode(), 1);
5265     }
5266     SDValue Loaded = DAG.getNode(AArch64ISD::LS64_BUILD, DL, MVT::i64x8, Ops);
5267     return DAG.getMergeValues({Loaded, Chain}, DL);
5268   }
5269 
5270   // Custom lowering for extending v4i8 vector loads.
5271   EVT VT = Op->getValueType(0);
5272   assert((VT == MVT::v4i16 || VT == MVT::v4i32) && "Expected v4i16 or v4i32");
5273 
5274   if (LoadNode->getMemoryVT() != MVT::v4i8)
5275     return SDValue();
5276 
5277   unsigned ExtType;
5278   if (LoadNode->getExtensionType() == ISD::SEXTLOAD)
5279     ExtType = ISD::SIGN_EXTEND;
5280   else if (LoadNode->getExtensionType() == ISD::ZEXTLOAD ||
5281            LoadNode->getExtensionType() == ISD::EXTLOAD)
5282     ExtType = ISD::ZERO_EXTEND;
5283   else
5284     return SDValue();
5285 
5286   SDValue Load = DAG.getLoad(MVT::f32, DL, LoadNode->getChain(),
5287                              LoadNode->getBasePtr(), MachinePointerInfo());
5288   SDValue Chain = Load.getValue(1);
5289   SDValue Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f32, Load);
5290   SDValue BC = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Vec);
5291   SDValue Ext = DAG.getNode(ExtType, DL, MVT::v8i16, BC);
5292   Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Ext,
5293                     DAG.getConstant(0, DL, MVT::i64));
5294   if (VT == MVT::v4i32)
5295     Ext = DAG.getNode(ExtType, DL, MVT::v4i32, Ext);
5296   return DAG.getMergeValues({Ext, Chain}, DL);
5297 }
5298 
5299 // Generate SUBS and CSEL for integer abs.
5300 SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
5301   MVT VT = Op.getSimpleValueType();
5302 
5303   if (VT.isVector())
5304     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABS_MERGE_PASSTHRU);
5305 
5306   SDLoc DL(Op);
5307   SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
5308                             Op.getOperand(0));
5309   // Generate SUBS & CSEL.
5310   SDValue Cmp =
5311       DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
5312                   Op.getOperand(0), DAG.getConstant(0, DL, VT));
5313   return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg,
5314                      DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
5315                      Cmp.getValue(1));
5316 }
5317 
5318 static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
5319   SDValue Chain = Op.getOperand(0);
5320   SDValue Cond = Op.getOperand(1);
5321   SDValue Dest = Op.getOperand(2);
5322 
5323   AArch64CC::CondCode CC;
5324   if (SDValue Cmp = emitConjunction(DAG, Cond, CC)) {
5325     SDLoc dl(Op);
5326     SDValue CCVal = DAG.getConstant(CC, dl, MVT::i32);
5327     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
5328                        Cmp);
5329   }
5330 
5331   return SDValue();
5332 }
5333 
5334 SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
5335                                               SelectionDAG &DAG) const {
5336   LLVM_DEBUG(dbgs() << "Custom lowering: ");
5337   LLVM_DEBUG(Op.dump());
5338 
5339   switch (Op.getOpcode()) {
5340   default:
5341     llvm_unreachable("unimplemented operand");
5342     return SDValue();
5343   case ISD::BITCAST:
5344     return LowerBITCAST(Op, DAG);
5345   case ISD::GlobalAddress:
5346     return LowerGlobalAddress(Op, DAG);
5347   case ISD::GlobalTLSAddress:
5348     return LowerGlobalTLSAddress(Op, DAG);
5349   case ISD::SETCC:
5350   case ISD::STRICT_FSETCC:
5351   case ISD::STRICT_FSETCCS:
5352     return LowerSETCC(Op, DAG);
5353   case ISD::BRCOND:
5354     return LowerBRCOND(Op, DAG);
5355   case ISD::BR_CC:
5356     return LowerBR_CC(Op, DAG);
5357   case ISD::SELECT:
5358     return LowerSELECT(Op, DAG);
5359   case ISD::SELECT_CC:
5360     return LowerSELECT_CC(Op, DAG);
5361   case ISD::JumpTable:
5362     return LowerJumpTable(Op, DAG);
5363   case ISD::BR_JT:
5364     return LowerBR_JT(Op, DAG);
5365   case ISD::ConstantPool:
5366     return LowerConstantPool(Op, DAG);
5367   case ISD::BlockAddress:
5368     return LowerBlockAddress(Op, DAG);
5369   case ISD::VASTART:
5370     return LowerVASTART(Op, DAG);
5371   case ISD::VACOPY:
5372     return LowerVACOPY(Op, DAG);
5373   case ISD::VAARG:
5374     return LowerVAARG(Op, DAG);
5375   case ISD::ADDCARRY:
5376     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, false /*unsigned*/);
5377   case ISD::SUBCARRY:
5378     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, false /*unsigned*/);
5379   case ISD::SADDO_CARRY:
5380     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::ADCS, true /*signed*/);
5381   case ISD::SSUBO_CARRY:
5382     return lowerADDSUBCARRY(Op, DAG, AArch64ISD::SBCS, true /*signed*/);
5383   case ISD::SADDO:
5384   case ISD::UADDO:
5385   case ISD::SSUBO:
5386   case ISD::USUBO:
5387   case ISD::SMULO:
5388   case ISD::UMULO:
5389     return LowerXALUO(Op, DAG);
5390   case ISD::FADD:
5391     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
5392   case ISD::FSUB:
5393     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
5394   case ISD::FMUL:
5395     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
5396   case ISD::FMA:
5397     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMA_PRED);
5398   case ISD::FDIV:
5399     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FDIV_PRED);
5400   case ISD::FNEG:
5401     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEG_MERGE_PASSTHRU);
5402   case ISD::FCEIL:
5403     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FCEIL_MERGE_PASSTHRU);
5404   case ISD::FFLOOR:
5405     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FFLOOR_MERGE_PASSTHRU);
5406   case ISD::FNEARBYINT:
5407     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FNEARBYINT_MERGE_PASSTHRU);
5408   case ISD::FRINT:
5409     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FRINT_MERGE_PASSTHRU);
5410   case ISD::FROUND:
5411     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUND_MERGE_PASSTHRU);
5412   case ISD::FROUNDEVEN:
5413     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU);
5414   case ISD::FTRUNC:
5415     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FTRUNC_MERGE_PASSTHRU);
5416   case ISD::FSQRT:
5417     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSQRT_MERGE_PASSTHRU);
5418   case ISD::FABS:
5419     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FABS_MERGE_PASSTHRU);
5420   case ISD::FP_ROUND:
5421   case ISD::STRICT_FP_ROUND:
5422     return LowerFP_ROUND(Op, DAG);
5423   case ISD::FP_EXTEND:
5424     return LowerFP_EXTEND(Op, DAG);
5425   case ISD::FRAMEADDR:
5426     return LowerFRAMEADDR(Op, DAG);
5427   case ISD::SPONENTRY:
5428     return LowerSPONENTRY(Op, DAG);
5429   case ISD::RETURNADDR:
5430     return LowerRETURNADDR(Op, DAG);
5431   case ISD::ADDROFRETURNADDR:
5432     return LowerADDROFRETURNADDR(Op, DAG);
5433   case ISD::CONCAT_VECTORS:
5434     return LowerCONCAT_VECTORS(Op, DAG);
5435   case ISD::INSERT_VECTOR_ELT:
5436     return LowerINSERT_VECTOR_ELT(Op, DAG);
5437   case ISD::EXTRACT_VECTOR_ELT:
5438     return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5439   case ISD::BUILD_VECTOR:
5440     return LowerBUILD_VECTOR(Op, DAG);
5441   case ISD::VECTOR_SHUFFLE:
5442     return LowerVECTOR_SHUFFLE(Op, DAG);
5443   case ISD::SPLAT_VECTOR:
5444     return LowerSPLAT_VECTOR(Op, DAG);
5445   case ISD::EXTRACT_SUBVECTOR:
5446     return LowerEXTRACT_SUBVECTOR(Op, DAG);
5447   case ISD::INSERT_SUBVECTOR:
5448     return LowerINSERT_SUBVECTOR(Op, DAG);
5449   case ISD::SDIV:
5450   case ISD::UDIV:
5451     return LowerDIV(Op, DAG);
5452   case ISD::SMIN:
5453   case ISD::UMIN:
5454   case ISD::SMAX:
5455   case ISD::UMAX:
5456     return LowerMinMax(Op, DAG);
5457   case ISD::SRA:
5458   case ISD::SRL:
5459   case ISD::SHL:
5460     return LowerVectorSRA_SRL_SHL(Op, DAG);
5461   case ISD::SHL_PARTS:
5462   case ISD::SRL_PARTS:
5463   case ISD::SRA_PARTS:
5464     return LowerShiftParts(Op, DAG);
5465   case ISD::CTPOP:
5466     return LowerCTPOP(Op, DAG);
5467   case ISD::FCOPYSIGN:
5468     return LowerFCOPYSIGN(Op, DAG);
5469   case ISD::OR:
5470     return LowerVectorOR(Op, DAG);
5471   case ISD::XOR:
5472     return LowerXOR(Op, DAG);
5473   case ISD::PREFETCH:
5474     return LowerPREFETCH(Op, DAG);
5475   case ISD::SINT_TO_FP:
5476   case ISD::UINT_TO_FP:
5477   case ISD::STRICT_SINT_TO_FP:
5478   case ISD::STRICT_UINT_TO_FP:
5479     return LowerINT_TO_FP(Op, DAG);
5480   case ISD::FP_TO_SINT:
5481   case ISD::FP_TO_UINT:
5482   case ISD::STRICT_FP_TO_SINT:
5483   case ISD::STRICT_FP_TO_UINT:
5484     return LowerFP_TO_INT(Op, DAG);
5485   case ISD::FP_TO_SINT_SAT:
5486   case ISD::FP_TO_UINT_SAT:
5487     return LowerFP_TO_INT_SAT(Op, DAG);
5488   case ISD::FSINCOS:
5489     return LowerFSINCOS(Op, DAG);
5490   case ISD::FLT_ROUNDS_:
5491     return LowerFLT_ROUNDS_(Op, DAG);
5492   case ISD::SET_ROUNDING:
5493     return LowerSET_ROUNDING(Op, DAG);
5494   case ISD::MUL:
5495     return LowerMUL(Op, DAG);
5496   case ISD::MULHS:
5497     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHS_PRED);
5498   case ISD::MULHU:
5499     return LowerToPredicatedOp(Op, DAG, AArch64ISD::MULHU_PRED);
5500   case ISD::INTRINSIC_W_CHAIN:
5501     return LowerINTRINSIC_W_CHAIN(Op, DAG);
5502   case ISD::INTRINSIC_WO_CHAIN:
5503     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5504   case ISD::ATOMIC_STORE:
5505     if (cast<MemSDNode>(Op)->getMemoryVT() == MVT::i128) {
5506       assert(Subtarget->hasLSE2());
5507       return LowerStore128(Op, DAG);
5508     }
5509     return SDValue();
5510   case ISD::STORE:
5511     return LowerSTORE(Op, DAG);
5512   case ISD::MSTORE:
5513     return LowerFixedLengthVectorMStoreToSVE(Op, DAG);
5514   case ISD::MGATHER:
5515     return LowerMGATHER(Op, DAG);
5516   case ISD::MSCATTER:
5517     return LowerMSCATTER(Op, DAG);
5518   case ISD::VECREDUCE_SEQ_FADD:
5519     return LowerVECREDUCE_SEQ_FADD(Op, DAG);
5520   case ISD::VECREDUCE_ADD:
5521   case ISD::VECREDUCE_AND:
5522   case ISD::VECREDUCE_OR:
5523   case ISD::VECREDUCE_XOR:
5524   case ISD::VECREDUCE_SMAX:
5525   case ISD::VECREDUCE_SMIN:
5526   case ISD::VECREDUCE_UMAX:
5527   case ISD::VECREDUCE_UMIN:
5528   case ISD::VECREDUCE_FADD:
5529   case ISD::VECREDUCE_FMAX:
5530   case ISD::VECREDUCE_FMIN:
5531     return LowerVECREDUCE(Op, DAG);
5532   case ISD::ATOMIC_LOAD_SUB:
5533     return LowerATOMIC_LOAD_SUB(Op, DAG);
5534   case ISD::ATOMIC_LOAD_AND:
5535     return LowerATOMIC_LOAD_AND(Op, DAG);
5536   case ISD::DYNAMIC_STACKALLOC:
5537     return LowerDYNAMIC_STACKALLOC(Op, DAG);
5538   case ISD::VSCALE:
5539     return LowerVSCALE(Op, DAG);
5540   case ISD::ANY_EXTEND:
5541   case ISD::SIGN_EXTEND:
5542   case ISD::ZERO_EXTEND:
5543     return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
5544   case ISD::SIGN_EXTEND_INREG: {
5545     // Only custom lower when ExtraVT has a legal byte based element type.
5546     EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
5547     EVT ExtraEltVT = ExtraVT.getVectorElementType();
5548     if ((ExtraEltVT != MVT::i8) && (ExtraEltVT != MVT::i16) &&
5549         (ExtraEltVT != MVT::i32) && (ExtraEltVT != MVT::i64))
5550       return SDValue();
5551 
5552     return LowerToPredicatedOp(Op, DAG,
5553                                AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU);
5554   }
5555   case ISD::TRUNCATE:
5556     return LowerTRUNCATE(Op, DAG);
5557   case ISD::MLOAD:
5558     return LowerMLOAD(Op, DAG);
5559   case ISD::LOAD:
5560     if (useSVEForFixedLengthVectorVT(Op.getValueType()))
5561       return LowerFixedLengthVectorLoadToSVE(Op, DAG);
5562     return LowerLOAD(Op, DAG);
5563   case ISD::ADD:
5564   case ISD::AND:
5565   case ISD::SUB:
5566     return LowerToScalableOp(Op, DAG);
5567   case ISD::FMAXIMUM:
5568     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAX_PRED);
5569   case ISD::FMAXNUM:
5570     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMAXNM_PRED);
5571   case ISD::FMINIMUM:
5572     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMIN_PRED);
5573   case ISD::FMINNUM:
5574     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMINNM_PRED);
5575   case ISD::VSELECT:
5576     return LowerFixedLengthVectorSelectToSVE(Op, DAG);
5577   case ISD::ABS:
5578     return LowerABS(Op, DAG);
5579   case ISD::ABDS:
5580     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDS_PRED);
5581   case ISD::ABDU:
5582     return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED);
5583   case ISD::BITREVERSE:
5584     return LowerBitreverse(Op, DAG);
5585   case ISD::BSWAP:
5586     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
5587   case ISD::CTLZ:
5588     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTLZ_MERGE_PASSTHRU);
5589   case ISD::CTTZ:
5590     return LowerCTTZ(Op, DAG);
5591   case ISD::VECTOR_SPLICE:
5592     return LowerVECTOR_SPLICE(Op, DAG);
5593   case ISD::STRICT_LROUND:
5594   case ISD::STRICT_LLROUND:
5595   case ISD::STRICT_LRINT:
5596   case ISD::STRICT_LLRINT: {
5597     assert(Op.getOperand(1).getValueType() == MVT::f16 &&
5598            "Expected custom lowering of rounding operations only for f16");
5599     SDLoc DL(Op);
5600     SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
5601                               {Op.getOperand(0), Op.getOperand(1)});
5602     return DAG.getNode(Op.getOpcode(), DL, {Op.getValueType(), MVT::Other},
5603                        {Ext.getValue(1), Ext.getValue(0)});
5604   }
5605   }
5606 }
5607 
5608 bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
5609   return !Subtarget->useSVEForFixedLengthVectors();
5610 }
5611 
5612 bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
5613     EVT VT, bool OverrideNEON) const {
5614   if (!VT.isFixedLengthVector() || !VT.isSimple())
5615     return false;
5616 
5617   // Don't use SVE for vectors we cannot scalarize if required.
5618   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
5619   // Fixed length predicates should be promoted to i8.
5620   // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
5621   case MVT::i1:
5622   default:
5623     return false;
5624   case MVT::i8:
5625   case MVT::i16:
5626   case MVT::i32:
5627   case MVT::i64:
5628   case MVT::f16:
5629   case MVT::f32:
5630   case MVT::f64:
5631     break;
5632   }
5633 
5634   // All SVE implementations support NEON sized vectors.
5635   if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
5636     return Subtarget->hasSVE();
5637 
5638   // Ensure NEON MVTs only belong to a single register class.
5639   if (VT.getFixedSizeInBits() <= 128)
5640     return false;
5641 
5642   // Ensure wider than NEON code generation is enabled.
5643   if (!Subtarget->useSVEForFixedLengthVectors())
5644     return false;
5645 
5646   // Don't use SVE for types that don't fit.
5647   if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits())
5648     return false;
5649 
5650   // TODO: Perhaps an artificial restriction, but worth having whilst getting
5651   // the base fixed length SVE support in place.
5652   if (!VT.isPow2VectorType())
5653     return false;
5654 
5655   return true;
5656 }
5657 
5658 //===----------------------------------------------------------------------===//
5659 //                      Calling Convention Implementation
5660 //===----------------------------------------------------------------------===//
5661 
5662 static unsigned getIntrinsicID(const SDNode *N) {
5663   unsigned Opcode = N->getOpcode();
5664   switch (Opcode) {
5665   default:
5666     return Intrinsic::not_intrinsic;
5667   case ISD::INTRINSIC_WO_CHAIN: {
5668     unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5669     if (IID < Intrinsic::num_intrinsics)
5670       return IID;
5671     return Intrinsic::not_intrinsic;
5672   }
5673   }
5674 }
5675 
5676 bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
5677                                                 SDValue N1) const {
5678   if (!N0.hasOneUse())
5679     return false;
5680 
5681   unsigned IID = getIntrinsicID(N1.getNode());
5682   // Avoid reassociating expressions that can be lowered to smlal/umlal.
5683   if (IID == Intrinsic::aarch64_neon_umull ||
5684       N1.getOpcode() == AArch64ISD::UMULL ||
5685       IID == Intrinsic::aarch64_neon_smull ||
5686       N1.getOpcode() == AArch64ISD::SMULL)
5687     return N0.getOpcode() != ISD::ADD;
5688 
5689   return true;
5690 }
5691 
5692 /// Selects the correct CCAssignFn for a given CallingConvention value.
5693 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
5694                                                      bool IsVarArg) const {
5695   switch (CC) {
5696   default:
5697     report_fatal_error("Unsupported calling convention.");
5698   case CallingConv::WebKit_JS:
5699     return CC_AArch64_WebKit_JS;
5700   case CallingConv::GHC:
5701     return CC_AArch64_GHC;
5702   case CallingConv::C:
5703   case CallingConv::Fast:
5704   case CallingConv::PreserveMost:
5705   case CallingConv::CXX_FAST_TLS:
5706   case CallingConv::Swift:
5707   case CallingConv::SwiftTail:
5708   case CallingConv::Tail:
5709     if (Subtarget->isTargetWindows() && IsVarArg)
5710       return CC_AArch64_Win64_VarArg;
5711     if (!Subtarget->isTargetDarwin())
5712       return CC_AArch64_AAPCS;
5713     if (!IsVarArg)
5714       return CC_AArch64_DarwinPCS;
5715     return Subtarget->isTargetILP32() ? CC_AArch64_DarwinPCS_ILP32_VarArg
5716                                       : CC_AArch64_DarwinPCS_VarArg;
5717    case CallingConv::Win64:
5718     return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
5719    case CallingConv::CFGuard_Check:
5720      return CC_AArch64_Win64_CFGuard_Check;
5721    case CallingConv::AArch64_VectorCall:
5722    case CallingConv::AArch64_SVE_VectorCall:
5723      return CC_AArch64_AAPCS;
5724   }
5725 }
5726 
5727 CCAssignFn *
5728 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
5729   return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS
5730                                       : RetCC_AArch64_AAPCS;
5731 }
5732 
5733 SDValue AArch64TargetLowering::LowerFormalArguments(
5734     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
5735     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5736     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5737   MachineFunction &MF = DAG.getMachineFunction();
5738   const Function &F = MF.getFunction();
5739   MachineFrameInfo &MFI = MF.getFrameInfo();
5740   bool IsWin64 = Subtarget->isCallingConvWin64(F.getCallingConv());
5741   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
5742 
5743   SmallVector<ISD::OutputArg, 4> Outs;
5744   GetReturnInfo(CallConv, F.getReturnType(), F.getAttributes(), Outs,
5745                 DAG.getTargetLoweringInfo(), MF.getDataLayout());
5746   if (any_of(Outs, [](ISD::OutputArg &Out){ return Out.VT.isScalableVector(); }))
5747     FuncInfo->setIsSVECC(true);
5748 
5749   // Assign locations to all of the incoming arguments.
5750   SmallVector<CCValAssign, 16> ArgLocs;
5751   DenseMap<unsigned, SDValue> CopiedRegs;
5752   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5753 
5754   // At this point, Ins[].VT may already be promoted to i32. To correctly
5755   // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and
5756   // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT.
5757   // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here
5758   // we use a special version of AnalyzeFormalArguments to pass in ValVT and
5759   // LocVT.
5760   unsigned NumArgs = Ins.size();
5761   Function::const_arg_iterator CurOrigArg = F.arg_begin();
5762   unsigned CurArgIdx = 0;
5763   for (unsigned i = 0; i != NumArgs; ++i) {
5764     MVT ValVT = Ins[i].VT;
5765     if (Ins[i].isOrigArg()) {
5766       std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
5767       CurArgIdx = Ins[i].getOrigArgIndex();
5768 
5769       // Get type of the original argument.
5770       EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(),
5771                                   /*AllowUnknown*/ true);
5772       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
5773       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
5774       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
5775         ValVT = MVT::i8;
5776       else if (ActualMVT == MVT::i16)
5777         ValVT = MVT::i16;
5778     }
5779     bool UseVarArgCC = false;
5780     if (IsWin64)
5781       UseVarArgCC = isVarArg;
5782     CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
5783     bool Res =
5784         AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
5785     assert(!Res && "Call operand has unhandled type");
5786     (void)Res;
5787   }
5788   SmallVector<SDValue, 16> ArgValues;
5789   unsigned ExtraArgLocs = 0;
5790   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5791     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
5792 
5793     if (Ins[i].Flags.isByVal()) {
5794       // Byval is used for HFAs in the PCS, but the system should work in a
5795       // non-compliant manner for larger structs.
5796       EVT PtrVT = getPointerTy(DAG.getDataLayout());
5797       int Size = Ins[i].Flags.getByValSize();
5798       unsigned NumRegs = (Size + 7) / 8;
5799 
5800       // FIXME: This works on big-endian for composite byvals, which are the common
5801       // case. It should also work for fundamental types too.
5802       unsigned FrameIdx =
5803         MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false);
5804       SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT);
5805       InVals.push_back(FrameIdxN);
5806 
5807       continue;
5808     }
5809 
5810     if (Ins[i].Flags.isSwiftAsync())
5811       MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5812 
5813     SDValue ArgValue;
5814     if (VA.isRegLoc()) {
5815       // Arguments stored in registers.
5816       EVT RegVT = VA.getLocVT();
5817       const TargetRegisterClass *RC;
5818 
5819       if (RegVT == MVT::i32)
5820         RC = &AArch64::GPR32RegClass;
5821       else if (RegVT == MVT::i64)
5822         RC = &AArch64::GPR64RegClass;
5823       else if (RegVT == MVT::f16 || RegVT == MVT::bf16)
5824         RC = &AArch64::FPR16RegClass;
5825       else if (RegVT == MVT::f32)
5826         RC = &AArch64::FPR32RegClass;
5827       else if (RegVT == MVT::f64 || RegVT.is64BitVector())
5828         RC = &AArch64::FPR64RegClass;
5829       else if (RegVT == MVT::f128 || RegVT.is128BitVector())
5830         RC = &AArch64::FPR128RegClass;
5831       else if (RegVT.isScalableVector() &&
5832                RegVT.getVectorElementType() == MVT::i1) {
5833         FuncInfo->setIsSVECC(true);
5834         RC = &AArch64::PPRRegClass;
5835       } else if (RegVT.isScalableVector()) {
5836         FuncInfo->setIsSVECC(true);
5837         RC = &AArch64::ZPRRegClass;
5838       } else
5839         llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
5840 
5841       // Transform the arguments in physical registers into virtual ones.
5842       Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
5843       ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
5844 
5845       // If this is an 8, 16 or 32-bit value, it is really passed promoted
5846       // to 64 bits.  Insert an assert[sz]ext to capture this, then
5847       // truncate to the right size.
5848       switch (VA.getLocInfo()) {
5849       default:
5850         llvm_unreachable("Unknown loc info!");
5851       case CCValAssign::Full:
5852         break;
5853       case CCValAssign::Indirect:
5854         assert(VA.getValVT().isScalableVector() &&
5855                "Only scalable vectors can be passed indirectly");
5856         break;
5857       case CCValAssign::BCvt:
5858         ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
5859         break;
5860       case CCValAssign::AExt:
5861       case CCValAssign::SExt:
5862       case CCValAssign::ZExt:
5863         break;
5864       case CCValAssign::AExtUpper:
5865         ArgValue = DAG.getNode(ISD::SRL, DL, RegVT, ArgValue,
5866                                DAG.getConstant(32, DL, RegVT));
5867         ArgValue = DAG.getZExtOrTrunc(ArgValue, DL, VA.getValVT());
5868         break;
5869       }
5870     } else { // VA.isRegLoc()
5871       assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem");
5872       unsigned ArgOffset = VA.getLocMemOffset();
5873       unsigned ArgSize = (VA.getLocInfo() == CCValAssign::Indirect
5874                               ? VA.getLocVT().getSizeInBits()
5875                               : VA.getValVT().getSizeInBits()) / 8;
5876 
5877       uint32_t BEAlign = 0;
5878       if (!Subtarget->isLittleEndian() && ArgSize < 8 &&
5879           !Ins[i].Flags.isInConsecutiveRegs())
5880         BEAlign = 8 - ArgSize;
5881 
5882       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true);
5883 
5884       // Create load nodes to retrieve arguments from the stack.
5885       SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5886 
5887       // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
5888       ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
5889       MVT MemVT = VA.getValVT();
5890 
5891       switch (VA.getLocInfo()) {
5892       default:
5893         break;
5894       case CCValAssign::Trunc:
5895       case CCValAssign::BCvt:
5896         MemVT = VA.getLocVT();
5897         break;
5898       case CCValAssign::Indirect:
5899         assert(VA.getValVT().isScalableVector() &&
5900                "Only scalable vectors can be passed indirectly");
5901         MemVT = VA.getLocVT();
5902         break;
5903       case CCValAssign::SExt:
5904         ExtType = ISD::SEXTLOAD;
5905         break;
5906       case CCValAssign::ZExt:
5907         ExtType = ISD::ZEXTLOAD;
5908         break;
5909       case CCValAssign::AExt:
5910         ExtType = ISD::EXTLOAD;
5911         break;
5912       }
5913 
5914       ArgValue =
5915           DAG.getExtLoad(ExtType, DL, VA.getLocVT(), Chain, FIN,
5916                          MachinePointerInfo::getFixedStack(MF, FI), MemVT);
5917     }
5918 
5919     if (VA.getLocInfo() == CCValAssign::Indirect) {
5920       assert(VA.getValVT().isScalableVector() &&
5921            "Only scalable vectors can be passed indirectly");
5922 
5923       uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize();
5924       unsigned NumParts = 1;
5925       if (Ins[i].Flags.isInConsecutiveRegs()) {
5926         assert(!Ins[i].Flags.isInConsecutiveRegsLast());
5927         while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
5928           ++NumParts;
5929       }
5930 
5931       MVT PartLoad = VA.getValVT();
5932       SDValue Ptr = ArgValue;
5933 
5934       // Ensure we generate all loads for each tuple part, whilst updating the
5935       // pointer after each load correctly using vscale.
5936       while (NumParts > 0) {
5937         ArgValue = DAG.getLoad(PartLoad, DL, Chain, Ptr, MachinePointerInfo());
5938         InVals.push_back(ArgValue);
5939         NumParts--;
5940         if (NumParts > 0) {
5941           SDValue BytesIncrement = DAG.getVScale(
5942               DL, Ptr.getValueType(),
5943               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
5944           SDNodeFlags Flags;
5945           Flags.setNoUnsignedWrap(true);
5946           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
5947                             BytesIncrement, Flags);
5948           ExtraArgLocs++;
5949           i++;
5950         }
5951       }
5952     } else {
5953       if (Subtarget->isTargetILP32() && Ins[i].Flags.isPointer())
5954         ArgValue = DAG.getNode(ISD::AssertZext, DL, ArgValue.getValueType(),
5955                                ArgValue, DAG.getValueType(MVT::i32));
5956 
5957       // i1 arguments are zero-extended to i8 by the caller. Emit a
5958       // hint to reflect this.
5959       if (Ins[i].isOrigArg()) {
5960         Argument *OrigArg = F.getArg(Ins[i].getOrigArgIndex());
5961         if (OrigArg->getType()->isIntegerTy(1)) {
5962           if (!Ins[i].Flags.isZExt()) {
5963             ArgValue = DAG.getNode(AArch64ISD::ASSERT_ZEXT_BOOL, DL,
5964                                    ArgValue.getValueType(), ArgValue);
5965           }
5966         }
5967       }
5968 
5969       InVals.push_back(ArgValue);
5970     }
5971   }
5972   assert((ArgLocs.size() + ExtraArgLocs) == Ins.size());
5973 
5974   // varargs
5975   if (isVarArg) {
5976     if (!Subtarget->isTargetDarwin() || IsWin64) {
5977       // The AAPCS variadic function ABI is identical to the non-variadic
5978       // one. As a result there may be more arguments in registers and we should
5979       // save them for future reference.
5980       // Win64 variadic functions also pass arguments in registers, but all float
5981       // arguments are passed in integer registers.
5982       saveVarArgRegisters(CCInfo, DAG, DL, Chain);
5983     }
5984 
5985     // This will point to the next argument passed via stack.
5986     unsigned StackOffset = CCInfo.getNextStackOffset();
5987     // We currently pass all varargs at 8-byte alignment, or 4 for ILP32
5988     StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
5989     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
5990 
5991     if (MFI.hasMustTailInVarArgFunc()) {
5992       SmallVector<MVT, 2> RegParmTypes;
5993       RegParmTypes.push_back(MVT::i64);
5994       RegParmTypes.push_back(MVT::f128);
5995       // Compute the set of forwarded registers. The rest are scratch.
5996       SmallVectorImpl<ForwardedRegister> &Forwards =
5997                                        FuncInfo->getForwardedMustTailRegParms();
5998       CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
5999                                                CC_AArch64_AAPCS);
6000 
6001       // Conservatively forward X8, since it might be used for aggregate return.
6002       if (!CCInfo.isAllocated(AArch64::X8)) {
6003         Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
6004         Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
6005       }
6006     }
6007   }
6008 
6009   // On Windows, InReg pointers must be returned, so record the pointer in a
6010   // virtual register at the start of the function so it can be returned in the
6011   // epilogue.
6012   if (IsWin64) {
6013     for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
6014       if (Ins[I].Flags.isInReg()) {
6015         assert(!FuncInfo->getSRetReturnReg());
6016 
6017         MVT PtrTy = getPointerTy(DAG.getDataLayout());
6018         Register Reg =
6019             MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
6020         FuncInfo->setSRetReturnReg(Reg);
6021 
6022         SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[I]);
6023         Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
6024         break;
6025       }
6026     }
6027   }
6028 
6029   unsigned StackArgSize = CCInfo.getNextStackOffset();
6030   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6031   if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
6032     // This is a non-standard ABI so by fiat I say we're allowed to make full
6033     // use of the stack area to be popped, which must be aligned to 16 bytes in
6034     // any case:
6035     StackArgSize = alignTo(StackArgSize, 16);
6036 
6037     // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
6038     // a multiple of 16.
6039     FuncInfo->setArgumentStackToRestore(StackArgSize);
6040 
6041     // This realignment carries over to the available bytes below. Our own
6042     // callers will guarantee the space is free by giving an aligned value to
6043     // CALLSEQ_START.
6044   }
6045   // Even if we're not expected to free up the space, it's useful to know how
6046   // much is there while considering tail calls (because we can reuse it).
6047   FuncInfo->setBytesInStackArgArea(StackArgSize);
6048 
6049   if (Subtarget->hasCustomCallingConv())
6050     Subtarget->getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
6051 
6052   return Chain;
6053 }
6054 
6055 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
6056                                                 SelectionDAG &DAG,
6057                                                 const SDLoc &DL,
6058                                                 SDValue &Chain) const {
6059   MachineFunction &MF = DAG.getMachineFunction();
6060   MachineFrameInfo &MFI = MF.getFrameInfo();
6061   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6062   auto PtrVT = getPointerTy(DAG.getDataLayout());
6063   bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
6064 
6065   SmallVector<SDValue, 8> MemOps;
6066 
6067   static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
6068                                           AArch64::X3, AArch64::X4, AArch64::X5,
6069                                           AArch64::X6, AArch64::X7 };
6070   static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs);
6071   unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
6072 
6073   unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
6074   int GPRIdx = 0;
6075   if (GPRSaveSize != 0) {
6076     if (IsWin64) {
6077       GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
6078       if (GPRSaveSize & 15)
6079         // The extra size here, if triggered, will always be 8.
6080         MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
6081     } else
6082       GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
6083 
6084     SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
6085 
6086     for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
6087       Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
6088       SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
6089       SDValue Store =
6090           DAG.getStore(Val.getValue(1), DL, Val, FIN,
6091                        IsWin64 ? MachinePointerInfo::getFixedStack(
6092                                      MF, GPRIdx, (i - FirstVariadicGPR) * 8)
6093                                : MachinePointerInfo::getStack(MF, i * 8));
6094       MemOps.push_back(Store);
6095       FIN =
6096           DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
6097     }
6098   }
6099   FuncInfo->setVarArgsGPRIndex(GPRIdx);
6100   FuncInfo->setVarArgsGPRSize(GPRSaveSize);
6101 
6102   if (Subtarget->hasFPARMv8() && !IsWin64) {
6103     static const MCPhysReg FPRArgRegs[] = {
6104         AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
6105         AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
6106     static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs);
6107     unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
6108 
6109     unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
6110     int FPRIdx = 0;
6111     if (FPRSaveSize != 0) {
6112       FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
6113 
6114       SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
6115 
6116       for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
6117         Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
6118         SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
6119 
6120         SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
6121                                      MachinePointerInfo::getStack(MF, i * 16));
6122         MemOps.push_back(Store);
6123         FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
6124                           DAG.getConstant(16, DL, PtrVT));
6125       }
6126     }
6127     FuncInfo->setVarArgsFPRIndex(FPRIdx);
6128     FuncInfo->setVarArgsFPRSize(FPRSaveSize);
6129   }
6130 
6131   if (!MemOps.empty()) {
6132     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
6133   }
6134 }
6135 
6136 /// LowerCallResult - Lower the result values of a call into the
6137 /// appropriate copies out of appropriate physical registers.
6138 SDValue AArch64TargetLowering::LowerCallResult(
6139     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
6140     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
6141     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
6142     SDValue ThisVal) const {
6143   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6144   // Assign locations to each value returned by this call.
6145   SmallVector<CCValAssign, 16> RVLocs;
6146   DenseMap<unsigned, SDValue> CopiedRegs;
6147   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
6148                  *DAG.getContext());
6149   CCInfo.AnalyzeCallResult(Ins, RetCC);
6150 
6151   // Copy all of the result registers out of their specified physreg.
6152   for (unsigned i = 0; i != RVLocs.size(); ++i) {
6153     CCValAssign VA = RVLocs[i];
6154 
6155     // Pass 'this' value directly from the argument to return value, to avoid
6156     // reg unit interference
6157     if (i == 0 && isThisReturn) {
6158       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
6159              "unexpected return calling convention register assignment");
6160       InVals.push_back(ThisVal);
6161       continue;
6162     }
6163 
6164     // Avoid copying a physreg twice since RegAllocFast is incompetent and only
6165     // allows one use of a physreg per block.
6166     SDValue Val = CopiedRegs.lookup(VA.getLocReg());
6167     if (!Val) {
6168       Val =
6169           DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
6170       Chain = Val.getValue(1);
6171       InFlag = Val.getValue(2);
6172       CopiedRegs[VA.getLocReg()] = Val;
6173     }
6174 
6175     switch (VA.getLocInfo()) {
6176     default:
6177       llvm_unreachable("Unknown loc info!");
6178     case CCValAssign::Full:
6179       break;
6180     case CCValAssign::BCvt:
6181       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6182       break;
6183     case CCValAssign::AExtUpper:
6184       Val = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Val,
6185                         DAG.getConstant(32, DL, VA.getLocVT()));
6186       LLVM_FALLTHROUGH;
6187     case CCValAssign::AExt:
6188       LLVM_FALLTHROUGH;
6189     case CCValAssign::ZExt:
6190       Val = DAG.getZExtOrTrunc(Val, DL, VA.getValVT());
6191       break;
6192     }
6193 
6194     InVals.push_back(Val);
6195   }
6196 
6197   return Chain;
6198 }
6199 
6200 /// Return true if the calling convention is one that we can guarantee TCO for.
6201 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
6202   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
6203          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
6204 }
6205 
6206 /// Return true if we might ever do TCO for calls with this calling convention.
6207 static bool mayTailCallThisCC(CallingConv::ID CC) {
6208   switch (CC) {
6209   case CallingConv::C:
6210   case CallingConv::AArch64_SVE_VectorCall:
6211   case CallingConv::PreserveMost:
6212   case CallingConv::Swift:
6213   case CallingConv::SwiftTail:
6214   case CallingConv::Tail:
6215   case CallingConv::Fast:
6216     return true;
6217   default:
6218     return false;
6219   }
6220 }
6221 
6222 static void analyzeCallOperands(const AArch64TargetLowering &TLI,
6223                                 const AArch64Subtarget *Subtarget,
6224                                 const TargetLowering::CallLoweringInfo &CLI,
6225                                 CCState &CCInfo) {
6226   const SelectionDAG &DAG = CLI.DAG;
6227   CallingConv::ID CalleeCC = CLI.CallConv;
6228   bool IsVarArg = CLI.IsVarArg;
6229   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6230   bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
6231 
6232   unsigned NumArgs = Outs.size();
6233   for (unsigned i = 0; i != NumArgs; ++i) {
6234     MVT ArgVT = Outs[i].VT;
6235     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6236 
6237     bool UseVarArgCC = false;
6238     if (IsVarArg) {
6239       // On Windows, the fixed arguments in a vararg call are passed in GPRs
6240       // too, so use the vararg CC to force them to integer registers.
6241       if (IsCalleeWin64) {
6242         UseVarArgCC = true;
6243       } else {
6244         UseVarArgCC = !Outs[i].IsFixed;
6245       }
6246     } else {
6247       // Get type of the original argument.
6248       EVT ActualVT =
6249           TLI.getValueType(DAG.getDataLayout(), CLI.Args[Outs[i].OrigArgIndex].Ty,
6250                        /*AllowUnknown*/ true);
6251       MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ArgVT;
6252       // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
6253       if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
6254         ArgVT = MVT::i8;
6255       else if (ActualMVT == MVT::i16)
6256         ArgVT = MVT::i16;
6257     }
6258 
6259     CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC);
6260     bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
6261     assert(!Res && "Call operand has unhandled type");
6262     (void)Res;
6263   }
6264 }
6265 
6266 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
6267     const CallLoweringInfo &CLI) const {
6268   CallingConv::ID CalleeCC = CLI.CallConv;
6269   if (!mayTailCallThisCC(CalleeCC))
6270     return false;
6271 
6272   SDValue Callee = CLI.Callee;
6273   bool IsVarArg = CLI.IsVarArg;
6274   const SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6275   const SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6276   const SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6277   const SelectionDAG &DAG = CLI.DAG;
6278   MachineFunction &MF = DAG.getMachineFunction();
6279   const Function &CallerF = MF.getFunction();
6280   CallingConv::ID CallerCC = CallerF.getCallingConv();
6281 
6282   // Functions using the C or Fast calling convention that have an SVE signature
6283   // preserve more registers and should assume the SVE_VectorCall CC.
6284   // The check for matching callee-saved regs will determine whether it is
6285   // eligible for TCO.
6286   if ((CallerCC == CallingConv::C || CallerCC == CallingConv::Fast) &&
6287       MF.getInfo<AArch64FunctionInfo>()->isSVECC())
6288     CallerCC = CallingConv::AArch64_SVE_VectorCall;
6289 
6290   bool CCMatch = CallerCC == CalleeCC;
6291 
6292   // When using the Windows calling convention on a non-windows OS, we want
6293   // to back up and restore X18 in such functions; we can't do a tail call
6294   // from those functions.
6295   if (CallerCC == CallingConv::Win64 && !Subtarget->isTargetWindows() &&
6296       CalleeCC != CallingConv::Win64)
6297     return false;
6298 
6299   // Byval parameters hand the function a pointer directly into the stack area
6300   // we want to reuse during a tail call. Working around this *is* possible (see
6301   // X86) but less efficient and uglier in LowerCall.
6302   for (Function::const_arg_iterator i = CallerF.arg_begin(),
6303                                     e = CallerF.arg_end();
6304        i != e; ++i) {
6305     if (i->hasByValAttr())
6306       return false;
6307 
6308     // On Windows, "inreg" attributes signify non-aggregate indirect returns.
6309     // In this case, it is necessary to save/restore X0 in the callee. Tail
6310     // call opt interferes with this. So we disable tail call opt when the
6311     // caller has an argument with "inreg" attribute.
6312 
6313     // FIXME: Check whether the callee also has an "inreg" argument.
6314     if (i->hasInRegAttr())
6315       return false;
6316   }
6317 
6318   if (canGuaranteeTCO(CalleeCC, getTargetMachine().Options.GuaranteedTailCallOpt))
6319     return CCMatch;
6320 
6321   // Externally-defined functions with weak linkage should not be
6322   // tail-called on AArch64 when the OS does not support dynamic
6323   // pre-emption of symbols, as the AAELF spec requires normal calls
6324   // to undefined weak functions to be replaced with a NOP or jump to the
6325   // next instruction. The behaviour of branch instructions in this
6326   // situation (as used for tail calls) is implementation-defined, so we
6327   // cannot rely on the linker replacing the tail call with a return.
6328   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6329     const GlobalValue *GV = G->getGlobal();
6330     const Triple &TT = getTargetMachine().getTargetTriple();
6331     if (GV->hasExternalWeakLinkage() &&
6332         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
6333       return false;
6334   }
6335 
6336   // Now we search for cases where we can use a tail call without changing the
6337   // ABI. Sibcall is used in some places (particularly gcc) to refer to this
6338   // concept.
6339 
6340   // I want anyone implementing a new calling convention to think long and hard
6341   // about this assert.
6342   assert((!IsVarArg || CalleeCC == CallingConv::C) &&
6343          "Unexpected variadic calling convention");
6344 
6345   LLVMContext &C = *DAG.getContext();
6346   // Check that the call results are passed in the same way.
6347   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
6348                                   CCAssignFnForCall(CalleeCC, IsVarArg),
6349                                   CCAssignFnForCall(CallerCC, IsVarArg)))
6350     return false;
6351   // The callee has to preserve all registers the caller needs to preserve.
6352   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6353   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6354   if (!CCMatch) {
6355     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6356     if (Subtarget->hasCustomCallingConv()) {
6357       TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
6358       TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
6359     }
6360     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6361       return false;
6362   }
6363 
6364   // Nothing more to check if the callee is taking no arguments
6365   if (Outs.empty())
6366     return true;
6367 
6368   SmallVector<CCValAssign, 16> ArgLocs;
6369   CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
6370 
6371   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6372 
6373   if (IsVarArg && !(CLI.CB && CLI.CB->isMustTailCall())) {
6374     // When we are musttail, additional checks have been done and we can safely ignore this check
6375     // At least two cases here: if caller is fastcc then we can't have any
6376     // memory arguments (we'd be expected to clean up the stack afterwards). If
6377     // caller is C then we could potentially use its argument area.
6378 
6379     // FIXME: for now we take the most conservative of these in both cases:
6380     // disallow all variadic memory operands.
6381     for (const CCValAssign &ArgLoc : ArgLocs)
6382       if (!ArgLoc.isRegLoc())
6383         return false;
6384   }
6385 
6386   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6387 
6388   // If any of the arguments is passed indirectly, it must be SVE, so the
6389   // 'getBytesInStackArgArea' is not sufficient to determine whether we need to
6390   // allocate space on the stack. That is why we determine this explicitly here
6391   // the call cannot be a tailcall.
6392   if (llvm::any_of(ArgLocs, [](CCValAssign &A) {
6393         assert((A.getLocInfo() != CCValAssign::Indirect ||
6394                 A.getValVT().isScalableVector()) &&
6395                "Expected value to be scalable");
6396         return A.getLocInfo() == CCValAssign::Indirect;
6397       }))
6398     return false;
6399 
6400   // If the stack arguments for this call do not fit into our own save area then
6401   // the call cannot be made tail.
6402   if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
6403     return false;
6404 
6405   const MachineRegisterInfo &MRI = MF.getRegInfo();
6406   if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
6407     return false;
6408 
6409   return true;
6410 }
6411 
6412 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
6413                                                    SelectionDAG &DAG,
6414                                                    MachineFrameInfo &MFI,
6415                                                    int ClobberedFI) const {
6416   SmallVector<SDValue, 8> ArgChains;
6417   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
6418   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
6419 
6420   // Include the original chain at the beginning of the list. When this is
6421   // used by target LowerCall hooks, this helps legalize find the
6422   // CALLSEQ_BEGIN node.
6423   ArgChains.push_back(Chain);
6424 
6425   // Add a chain value for each stack argument corresponding
6426   for (SDNode *U : DAG.getEntryNode().getNode()->uses())
6427     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
6428       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6429         if (FI->getIndex() < 0) {
6430           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
6431           int64_t InLastByte = InFirstByte;
6432           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
6433 
6434           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
6435               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
6436             ArgChains.push_back(SDValue(L, 1));
6437         }
6438 
6439   // Build a tokenfactor for all the chains.
6440   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6441 }
6442 
6443 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
6444                                                    bool TailCallOpt) const {
6445   return (CallCC == CallingConv::Fast && TailCallOpt) ||
6446          CallCC == CallingConv::Tail || CallCC == CallingConv::SwiftTail;
6447 }
6448 
6449 // Check if the value is zero-extended from i1 to i8
6450 static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
6451   unsigned SizeInBits = Arg.getValueType().getSizeInBits();
6452   if (SizeInBits < 8)
6453     return false;
6454 
6455   APInt RequredZero(SizeInBits, 0xFE);
6456   KnownBits Bits = DAG.computeKnownBits(Arg, 4);
6457   bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
6458   return ZExtBool;
6459 }
6460 
6461 /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
6462 /// and add input and output parameter nodes.
6463 SDValue
6464 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
6465                                  SmallVectorImpl<SDValue> &InVals) const {
6466   SelectionDAG &DAG = CLI.DAG;
6467   SDLoc &DL = CLI.DL;
6468   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
6469   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
6470   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
6471   SDValue Chain = CLI.Chain;
6472   SDValue Callee = CLI.Callee;
6473   bool &IsTailCall = CLI.IsTailCall;
6474   CallingConv::ID &CallConv = CLI.CallConv;
6475   bool IsVarArg = CLI.IsVarArg;
6476 
6477   MachineFunction &MF = DAG.getMachineFunction();
6478   MachineFunction::CallSiteInfo CSInfo;
6479   bool IsThisReturn = false;
6480 
6481   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6482   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
6483   bool IsSibCall = false;
6484   bool GuardWithBTI = false;
6485 
6486   if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
6487       !Subtarget->noBTIAtReturnTwice()) {
6488     GuardWithBTI = FuncInfo->branchTargetEnforcement();
6489   }
6490 
6491   // Check callee args/returns for SVE registers and set calling convention
6492   // accordingly.
6493   if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
6494     bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){
6495       return Out.VT.isScalableVector();
6496     });
6497     bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){
6498       return In.VT.isScalableVector();
6499     });
6500 
6501     if (CalleeInSVE || CalleeOutSVE)
6502       CallConv = CallingConv::AArch64_SVE_VectorCall;
6503   }
6504 
6505   if (IsTailCall) {
6506     // Check if it's really possible to do a tail call.
6507     IsTailCall = isEligibleForTailCallOptimization(CLI);
6508 
6509     // A sibling call is one where we're under the usual C ABI and not planning
6510     // to change that but can still do a tail call:
6511     if (!TailCallOpt && IsTailCall && CallConv != CallingConv::Tail &&
6512         CallConv != CallingConv::SwiftTail)
6513       IsSibCall = true;
6514 
6515     if (IsTailCall)
6516       ++NumTailCalls;
6517   }
6518 
6519   if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
6520     report_fatal_error("failed to perform tail call elimination on a call "
6521                        "site marked musttail");
6522 
6523   // Analyze operands of the call, assigning locations to each operand.
6524   SmallVector<CCValAssign, 16> ArgLocs;
6525   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6526 
6527   if (IsVarArg) {
6528     unsigned NumArgs = Outs.size();
6529 
6530     for (unsigned i = 0; i != NumArgs; ++i) {
6531       if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector())
6532         report_fatal_error("Passing SVE types to variadic functions is "
6533                            "currently not supported");
6534     }
6535   }
6536 
6537   analyzeCallOperands(*this, Subtarget, CLI, CCInfo);
6538 
6539   // Get a count of how many bytes are to be pushed on the stack.
6540   unsigned NumBytes = CCInfo.getNextStackOffset();
6541 
6542   if (IsSibCall) {
6543     // Since we're not changing the ABI to make this a tail call, the memory
6544     // operands are already available in the caller's incoming argument space.
6545     NumBytes = 0;
6546   }
6547 
6548   // FPDiff is the byte offset of the call's argument area from the callee's.
6549   // Stores to callee stack arguments will be placed in FixedStackSlots offset
6550   // by this amount for a tail call. In a sibling call it must be 0 because the
6551   // caller will deallocate the entire stack and the callee still expects its
6552   // arguments to begin at SP+0. Completely unused for non-tail calls.
6553   int FPDiff = 0;
6554 
6555   if (IsTailCall && !IsSibCall) {
6556     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
6557 
6558     // Since callee will pop argument stack as a tail call, we must keep the
6559     // popped size 16-byte aligned.
6560     NumBytes = alignTo(NumBytes, 16);
6561 
6562     // FPDiff will be negative if this tail call requires more space than we
6563     // would automatically have in our incoming argument space. Positive if we
6564     // can actually shrink the stack.
6565     FPDiff = NumReusableBytes - NumBytes;
6566 
6567     // Update the required reserved area if this is the tail call requiring the
6568     // most argument stack space.
6569     if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
6570       FuncInfo->setTailCallReservedStack(-FPDiff);
6571 
6572     // The stack pointer must be 16-byte aligned at all times it's used for a
6573     // memory operation, which in practice means at *all* times and in
6574     // particular across call boundaries. Therefore our own arguments started at
6575     // a 16-byte aligned SP and the delta applied for the tail call should
6576     // satisfy the same constraint.
6577     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
6578   }
6579 
6580   // Adjust the stack pointer for the new arguments...
6581   // These operations are automatically eliminated by the prolog/epilog pass
6582   if (!IsSibCall)
6583     Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL);
6584 
6585   SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
6586                                         getPointerTy(DAG.getDataLayout()));
6587 
6588   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6589   SmallSet<unsigned, 8> RegsUsed;
6590   SmallVector<SDValue, 8> MemOpChains;
6591   auto PtrVT = getPointerTy(DAG.getDataLayout());
6592 
6593   if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
6594     const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
6595     for (const auto &F : Forwards) {
6596       SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
6597        RegsToPass.emplace_back(F.PReg, Val);
6598     }
6599   }
6600 
6601   // Walk the register/memloc assignments, inserting copies/loads.
6602   unsigned ExtraArgLocs = 0;
6603   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6604     CCValAssign &VA = ArgLocs[i - ExtraArgLocs];
6605     SDValue Arg = OutVals[i];
6606     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6607 
6608     // Promote the value if needed.
6609     switch (VA.getLocInfo()) {
6610     default:
6611       llvm_unreachable("Unknown loc info!");
6612     case CCValAssign::Full:
6613       break;
6614     case CCValAssign::SExt:
6615       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
6616       break;
6617     case CCValAssign::ZExt:
6618       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6619       break;
6620     case CCValAssign::AExt:
6621       if (Outs[i].ArgVT == MVT::i1) {
6622         // AAPCS requires i1 to be zero-extended to 8-bits by the caller.
6623         //
6624         // Check if we actually have to do this, because the value may
6625         // already be zero-extended.
6626         //
6627         // We cannot just emit a (zext i8 (trunc (assert-zext i8)))
6628         // and rely on DAGCombiner to fold this, because the following
6629         // (anyext i32) is combined with (zext i8) in DAG.getNode:
6630         //
6631         //   (ext (zext x)) -> (zext x)
6632         //
6633         // This will give us (zext i32), which we cannot remove, so
6634         // try to check this beforehand.
6635         if (!checkZExtBool(Arg, DAG)) {
6636           Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6637           Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg);
6638         }
6639       }
6640       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6641       break;
6642     case CCValAssign::AExtUpper:
6643       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
6644       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
6645       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
6646                         DAG.getConstant(32, DL, VA.getLocVT()));
6647       break;
6648     case CCValAssign::BCvt:
6649       Arg = DAG.getBitcast(VA.getLocVT(), Arg);
6650       break;
6651     case CCValAssign::Trunc:
6652       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
6653       break;
6654     case CCValAssign::FPExt:
6655       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
6656       break;
6657     case CCValAssign::Indirect:
6658       assert(VA.getValVT().isScalableVector() &&
6659              "Only scalable vectors can be passed indirectly");
6660 
6661       uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize();
6662       uint64_t PartSize = StoreSize;
6663       unsigned NumParts = 1;
6664       if (Outs[i].Flags.isInConsecutiveRegs()) {
6665         assert(!Outs[i].Flags.isInConsecutiveRegsLast());
6666         while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
6667           ++NumParts;
6668         StoreSize *= NumParts;
6669       }
6670 
6671       MachineFrameInfo &MFI = MF.getFrameInfo();
6672       Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
6673       Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
6674       int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
6675       MFI.setStackID(FI, TargetStackID::ScalableVector);
6676 
6677       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6678       SDValue Ptr = DAG.getFrameIndex(
6679           FI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout()));
6680       SDValue SpillSlot = Ptr;
6681 
6682       // Ensure we generate all stores for each tuple part, whilst updating the
6683       // pointer after each store correctly using vscale.
6684       while (NumParts) {
6685         Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
6686         NumParts--;
6687         if (NumParts > 0) {
6688           SDValue BytesIncrement = DAG.getVScale(
6689               DL, Ptr.getValueType(),
6690               APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize));
6691           SDNodeFlags Flags;
6692           Flags.setNoUnsignedWrap(true);
6693 
6694           MPI = MachinePointerInfo(MPI.getAddrSpace());
6695           Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6696                             BytesIncrement, Flags);
6697           ExtraArgLocs++;
6698           i++;
6699         }
6700       }
6701 
6702       Arg = SpillSlot;
6703       break;
6704     }
6705 
6706     if (VA.isRegLoc()) {
6707       if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
6708           Outs[0].VT == MVT::i64) {
6709         assert(VA.getLocVT() == MVT::i64 &&
6710                "unexpected calling convention register assignment");
6711         assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&
6712                "unexpected use of 'returned'");
6713         IsThisReturn = true;
6714       }
6715       if (RegsUsed.count(VA.getLocReg())) {
6716         // If this register has already been used then we're trying to pack
6717         // parts of an [N x i32] into an X-register. The extension type will
6718         // take care of putting the two halves in the right place but we have to
6719         // combine them.
6720         SDValue &Bits =
6721             llvm::find_if(RegsToPass,
6722                           [=](const std::pair<unsigned, SDValue> &Elt) {
6723                             return Elt.first == VA.getLocReg();
6724                           })
6725                 ->second;
6726         Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
6727         // Call site info is used for function's parameter entry value
6728         // tracking. For now we track only simple cases when parameter
6729         // is transferred through whole register.
6730         llvm::erase_if(CSInfo, [&VA](MachineFunction::ArgRegPair ArgReg) {
6731           return ArgReg.Reg == VA.getLocReg();
6732         });
6733       } else {
6734         RegsToPass.emplace_back(VA.getLocReg(), Arg);
6735         RegsUsed.insert(VA.getLocReg());
6736         const TargetOptions &Options = DAG.getTarget().Options;
6737         if (Options.EmitCallSiteInfo)
6738           CSInfo.emplace_back(VA.getLocReg(), i);
6739       }
6740     } else {
6741       assert(VA.isMemLoc());
6742 
6743       SDValue DstAddr;
6744       MachinePointerInfo DstInfo;
6745 
6746       // FIXME: This works on big-endian for composite byvals, which are the
6747       // common case. It should also work for fundamental types too.
6748       uint32_t BEAlign = 0;
6749       unsigned OpSize;
6750       if (VA.getLocInfo() == CCValAssign::Indirect ||
6751           VA.getLocInfo() == CCValAssign::Trunc)
6752         OpSize = VA.getLocVT().getFixedSizeInBits();
6753       else
6754         OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
6755                                  : VA.getValVT().getSizeInBits();
6756       OpSize = (OpSize + 7) / 8;
6757       if (!Subtarget->isLittleEndian() && !Flags.isByVal() &&
6758           !Flags.isInConsecutiveRegs()) {
6759         if (OpSize < 8)
6760           BEAlign = 8 - OpSize;
6761       }
6762       unsigned LocMemOffset = VA.getLocMemOffset();
6763       int32_t Offset = LocMemOffset + BEAlign;
6764       SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6765       PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6766 
6767       if (IsTailCall) {
6768         Offset = Offset + FPDiff;
6769         int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
6770 
6771         DstAddr = DAG.getFrameIndex(FI, PtrVT);
6772         DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
6773 
6774         // Make sure any stack arguments overlapping with where we're storing
6775         // are loaded before this eventual operation. Otherwise they'll be
6776         // clobbered.
6777         Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
6778       } else {
6779         SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
6780 
6781         DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
6782         DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
6783       }
6784 
6785       if (Outs[i].Flags.isByVal()) {
6786         SDValue SizeNode =
6787             DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
6788         SDValue Cpy = DAG.getMemcpy(
6789             Chain, DL, DstAddr, Arg, SizeNode,
6790             Outs[i].Flags.getNonZeroByValAlign(),
6791             /*isVol = */ false, /*AlwaysInline = */ false,
6792             /*isTailCall = */ false, DstInfo, MachinePointerInfo());
6793 
6794         MemOpChains.push_back(Cpy);
6795       } else {
6796         // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already
6797         // promoted to a legal register type i32, we should truncate Arg back to
6798         // i1/i8/i16.
6799         if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 ||
6800             VA.getValVT() == MVT::i16)
6801           Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
6802 
6803         SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
6804         MemOpChains.push_back(Store);
6805       }
6806     }
6807   }
6808 
6809   if (!MemOpChains.empty())
6810     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
6811 
6812   // Build a sequence of copy-to-reg nodes chained together with token chain
6813   // and flag operands which copy the outgoing args into the appropriate regs.
6814   SDValue InFlag;
6815   for (auto &RegToPass : RegsToPass) {
6816     Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
6817                              RegToPass.second, InFlag);
6818     InFlag = Chain.getValue(1);
6819   }
6820 
6821   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
6822   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
6823   // node so that legalize doesn't hack it.
6824   if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6825     auto GV = G->getGlobal();
6826     unsigned OpFlags =
6827         Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine());
6828     if (OpFlags & AArch64II::MO_GOT) {
6829       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
6830       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6831     } else {
6832       const GlobalValue *GV = G->getGlobal();
6833       Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
6834     }
6835   } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
6836     if (getTargetMachine().getCodeModel() == CodeModel::Large &&
6837         Subtarget->isTargetMachO()) {
6838       const char *Sym = S->getSymbol();
6839       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
6840       Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
6841     } else {
6842       const char *Sym = S->getSymbol();
6843       Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
6844     }
6845   }
6846 
6847   // We don't usually want to end the call-sequence here because we would tidy
6848   // the frame up *after* the call, however in the ABI-changing tail-call case
6849   // we've carefully laid out the parameters so that when sp is reset they'll be
6850   // in the correct location.
6851   if (IsTailCall && !IsSibCall) {
6852     Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
6853                                DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
6854     InFlag = Chain.getValue(1);
6855   }
6856 
6857   std::vector<SDValue> Ops;
6858   Ops.push_back(Chain);
6859   Ops.push_back(Callee);
6860 
6861   if (IsTailCall) {
6862     // Each tail call may have to adjust the stack by a different amount, so
6863     // this information must travel along with the operation for eventual
6864     // consumption by emitEpilogue.
6865     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
6866   }
6867 
6868   // Add argument registers to the end of the list so that they are known live
6869   // into the call.
6870   for (auto &RegToPass : RegsToPass)
6871     Ops.push_back(DAG.getRegister(RegToPass.first,
6872                                   RegToPass.second.getValueType()));
6873 
6874   // Add a register mask operand representing the call-preserved registers.
6875   const uint32_t *Mask;
6876   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
6877   if (IsThisReturn) {
6878     // For 'this' returns, use the X0-preserving mask if applicable
6879     Mask = TRI->getThisReturnPreservedMask(MF, CallConv);
6880     if (!Mask) {
6881       IsThisReturn = false;
6882       Mask = TRI->getCallPreservedMask(MF, CallConv);
6883     }
6884   } else
6885     Mask = TRI->getCallPreservedMask(MF, CallConv);
6886 
6887   if (Subtarget->hasCustomCallingConv())
6888     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
6889 
6890   if (TRI->isAnyArgRegReserved(MF))
6891     TRI->emitReservedArgRegCallError(MF);
6892 
6893   assert(Mask && "Missing call preserved mask for calling convention");
6894   Ops.push_back(DAG.getRegisterMask(Mask));
6895 
6896   if (InFlag.getNode())
6897     Ops.push_back(InFlag);
6898 
6899   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6900 
6901   // If we're doing a tall call, use a TC_RETURN here rather than an
6902   // actual call instruction.
6903   if (IsTailCall) {
6904     MF.getFrameInfo().setHasTailCall();
6905     SDValue Ret = DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops);
6906     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
6907     return Ret;
6908   }
6909 
6910   unsigned CallOpc = AArch64ISD::CALL;
6911   // Calls with operand bundle "clang.arc.attachedcall" are special. They should
6912   // be expanded to the call, directly followed by a special marker sequence and
6913   // a call to an ObjC library function.  Use CALL_RVMARKER to do that.
6914   if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
6915     assert(!IsTailCall &&
6916            "tail calls cannot be marked with clang.arc.attachedcall");
6917     CallOpc = AArch64ISD::CALL_RVMARKER;
6918 
6919     // Add a target global address for the retainRV/claimRV runtime function
6920     // just before the call target.
6921     Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
6922     auto GA = DAG.getTargetGlobalAddress(ARCFn, DL, PtrVT);
6923     Ops.insert(Ops.begin() + 1, GA);
6924   } else if (GuardWithBTI)
6925     CallOpc = AArch64ISD::CALL_BTI;
6926 
6927   // Returns a chain and a flag for retval copy to use.
6928   Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
6929   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
6930   InFlag = Chain.getValue(1);
6931   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
6932 
6933   uint64_t CalleePopBytes =
6934       DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
6935 
6936   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
6937                              DAG.getIntPtrConstant(CalleePopBytes, DL, true),
6938                              InFlag, DL);
6939   if (!Ins.empty())
6940     InFlag = Chain.getValue(1);
6941 
6942   // Handle result values, copying them out of physregs into vregs that we
6943   // return.
6944   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
6945                          InVals, IsThisReturn,
6946                          IsThisReturn ? OutVals[0] : SDValue());
6947 }
6948 
6949 bool AArch64TargetLowering::CanLowerReturn(
6950     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
6951     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
6952   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6953   SmallVector<CCValAssign, 16> RVLocs;
6954   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6955   return CCInfo.CheckReturn(Outs, RetCC);
6956 }
6957 
6958 SDValue
6959 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6960                                    bool isVarArg,
6961                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
6962                                    const SmallVectorImpl<SDValue> &OutVals,
6963                                    const SDLoc &DL, SelectionDAG &DAG) const {
6964   auto &MF = DAG.getMachineFunction();
6965   auto *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
6966 
6967   CCAssignFn *RetCC = CCAssignFnForReturn(CallConv);
6968   SmallVector<CCValAssign, 16> RVLocs;
6969   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
6970   CCInfo.AnalyzeReturn(Outs, RetCC);
6971 
6972   // Copy the result values into the output registers.
6973   SDValue Flag;
6974   SmallVector<std::pair<unsigned, SDValue>, 4> RetVals;
6975   SmallSet<unsigned, 4> RegsUsed;
6976   for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size();
6977        ++i, ++realRVLocIdx) {
6978     CCValAssign &VA = RVLocs[i];
6979     assert(VA.isRegLoc() && "Can only return in registers!");
6980     SDValue Arg = OutVals[realRVLocIdx];
6981 
6982     switch (VA.getLocInfo()) {
6983     default:
6984       llvm_unreachable("Unknown loc info!");
6985     case CCValAssign::Full:
6986       if (Outs[i].ArgVT == MVT::i1) {
6987         // AAPCS requires i1 to be zero-extended to i8 by the producer of the
6988         // value. This is strictly redundant on Darwin (which uses "zeroext
6989         // i1"), but will be optimised out before ISel.
6990         Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg);
6991         Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
6992       }
6993       break;
6994     case CCValAssign::BCvt:
6995       Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
6996       break;
6997     case CCValAssign::AExt:
6998     case CCValAssign::ZExt:
6999       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7000       break;
7001     case CCValAssign::AExtUpper:
7002       assert(VA.getValVT() == MVT::i32 && "only expect 32 -> 64 upper bits");
7003       Arg = DAG.getZExtOrTrunc(Arg, DL, VA.getLocVT());
7004       Arg = DAG.getNode(ISD::SHL, DL, VA.getLocVT(), Arg,
7005                         DAG.getConstant(32, DL, VA.getLocVT()));
7006       break;
7007     }
7008 
7009     if (RegsUsed.count(VA.getLocReg())) {
7010       SDValue &Bits =
7011           llvm::find_if(RetVals, [=](const std::pair<unsigned, SDValue> &Elt) {
7012             return Elt.first == VA.getLocReg();
7013           })->second;
7014       Bits = DAG.getNode(ISD::OR, DL, Bits.getValueType(), Bits, Arg);
7015     } else {
7016       RetVals.emplace_back(VA.getLocReg(), Arg);
7017       RegsUsed.insert(VA.getLocReg());
7018     }
7019   }
7020 
7021   SmallVector<SDValue, 4> RetOps(1, Chain);
7022   for (auto &RetVal : RetVals) {
7023     Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Flag);
7024     Flag = Chain.getValue(1);
7025     RetOps.push_back(
7026         DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
7027   }
7028 
7029   // Windows AArch64 ABIs require that for returning structs by value we copy
7030   // the sret argument into X0 for the return.
7031   // We saved the argument into a virtual register in the entry block,
7032   // so now we copy the value out and into X0.
7033   if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
7034     SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
7035                                      getPointerTy(MF.getDataLayout()));
7036 
7037     unsigned RetValReg = AArch64::X0;
7038     Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
7039     Flag = Chain.getValue(1);
7040 
7041     RetOps.push_back(
7042       DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
7043   }
7044 
7045   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7046   const MCPhysReg *I = TRI->getCalleeSavedRegsViaCopy(&MF);
7047   if (I) {
7048     for (; *I; ++I) {
7049       if (AArch64::GPR64RegClass.contains(*I))
7050         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7051       else if (AArch64::FPR64RegClass.contains(*I))
7052         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7053       else
7054         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7055     }
7056   }
7057 
7058   RetOps[0] = Chain; // Update chain.
7059 
7060   // Add the flag if we have it.
7061   if (Flag.getNode())
7062     RetOps.push_back(Flag);
7063 
7064   return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps);
7065 }
7066 
7067 //===----------------------------------------------------------------------===//
7068 //  Other Lowering Code
7069 //===----------------------------------------------------------------------===//
7070 
7071 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
7072                                              SelectionDAG &DAG,
7073                                              unsigned Flag) const {
7074   return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
7075                                     N->getOffset(), Flag);
7076 }
7077 
7078 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
7079                                              SelectionDAG &DAG,
7080                                              unsigned Flag) const {
7081   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
7082 }
7083 
7084 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
7085                                              SelectionDAG &DAG,
7086                                              unsigned Flag) const {
7087   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
7088                                    N->getOffset(), Flag);
7089 }
7090 
7091 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
7092                                              SelectionDAG &DAG,
7093                                              unsigned Flag) const {
7094   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
7095 }
7096 
7097 // (loadGOT sym)
7098 template <class NodeTy>
7099 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
7100                                       unsigned Flags) const {
7101   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
7102   SDLoc DL(N);
7103   EVT Ty = getPointerTy(DAG.getDataLayout());
7104   SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags);
7105   // FIXME: Once remat is capable of dealing with instructions with register
7106   // operands, expand this into two nodes instead of using a wrapper node.
7107   return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
7108 }
7109 
7110 // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
7111 template <class NodeTy>
7112 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
7113                                             unsigned Flags) const {
7114   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
7115   SDLoc DL(N);
7116   EVT Ty = getPointerTy(DAG.getDataLayout());
7117   const unsigned char MO_NC = AArch64II::MO_NC;
7118   return DAG.getNode(
7119       AArch64ISD::WrapperLarge, DL, Ty,
7120       getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags),
7121       getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags),
7122       getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags),
7123       getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags));
7124 }
7125 
7126 // (addlow (adrp %hi(sym)) %lo(sym))
7127 template <class NodeTy>
7128 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
7129                                        unsigned Flags) const {
7130   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
7131   SDLoc DL(N);
7132   EVT Ty = getPointerTy(DAG.getDataLayout());
7133   SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags);
7134   SDValue Lo = getTargetNode(N, Ty, DAG,
7135                              AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags);
7136   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
7137   return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
7138 }
7139 
7140 // (adr sym)
7141 template <class NodeTy>
7142 SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
7143                                            unsigned Flags) const {
7144   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
7145   SDLoc DL(N);
7146   EVT Ty = getPointerTy(DAG.getDataLayout());
7147   SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
7148   return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
7149 }
7150 
7151 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
7152                                                   SelectionDAG &DAG) const {
7153   GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
7154   const GlobalValue *GV = GN->getGlobal();
7155   unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
7156 
7157   if (OpFlags != AArch64II::MO_NO_FLAG)
7158     assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
7159            "unexpected offset in global node");
7160 
7161   // This also catches the large code model case for Darwin, and tiny code
7162   // model with got relocations.
7163   if ((OpFlags & AArch64II::MO_GOT) != 0) {
7164     return getGOT(GN, DAG, OpFlags);
7165   }
7166 
7167   SDValue Result;
7168   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
7169     Result = getAddrLarge(GN, DAG, OpFlags);
7170   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
7171     Result = getAddrTiny(GN, DAG, OpFlags);
7172   } else {
7173     Result = getAddr(GN, DAG, OpFlags);
7174   }
7175   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7176   SDLoc DL(GN);
7177   if (OpFlags & (AArch64II::MO_DLLIMPORT | AArch64II::MO_COFFSTUB))
7178     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
7179                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
7180   return Result;
7181 }
7182 
7183 /// Convert a TLS address reference into the correct sequence of loads
7184 /// and calls to compute the variable's address (for Darwin, currently) and
7185 /// return an SDValue containing the final node.
7186 
7187 /// Darwin only has one TLS scheme which must be capable of dealing with the
7188 /// fully general situation, in the worst case. This means:
7189 ///     + "extern __thread" declaration.
7190 ///     + Defined in a possibly unknown dynamic library.
7191 ///
7192 /// The general system is that each __thread variable has a [3 x i64] descriptor
7193 /// which contains information used by the runtime to calculate the address. The
7194 /// only part of this the compiler needs to know about is the first xword, which
7195 /// contains a function pointer that must be called with the address of the
7196 /// entire descriptor in "x0".
7197 ///
7198 /// Since this descriptor may be in a different unit, in general even the
7199 /// descriptor must be accessed via an indirect load. The "ideal" code sequence
7200 /// is:
7201 ///     adrp x0, _var@TLVPPAGE
7202 ///     ldr x0, [x0, _var@TLVPPAGEOFF]   ; x0 now contains address of descriptor
7203 ///     ldr x1, [x0]                     ; x1 contains 1st entry of descriptor,
7204 ///                                      ; the function pointer
7205 ///     blr x1                           ; Uses descriptor address in x0
7206 ///     ; Address of _var is now in x0.
7207 ///
7208 /// If the address of _var's descriptor *is* known to the linker, then it can
7209 /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
7210 /// a slight efficiency gain.
7211 SDValue
7212 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
7213                                                    SelectionDAG &DAG) const {
7214   assert(Subtarget->isTargetDarwin() &&
7215          "This function expects a Darwin target");
7216 
7217   SDLoc DL(Op);
7218   MVT PtrVT = getPointerTy(DAG.getDataLayout());
7219   MVT PtrMemVT = getPointerMemTy(DAG.getDataLayout());
7220   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
7221 
7222   SDValue TLVPAddr =
7223       DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7224   SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr);
7225 
7226   // The first entry in the descriptor is a function pointer that we must call
7227   // to obtain the address of the variable.
7228   SDValue Chain = DAG.getEntryNode();
7229   SDValue FuncTLVGet = DAG.getLoad(
7230       PtrMemVT, DL, Chain, DescAddr,
7231       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
7232       Align(PtrMemVT.getSizeInBits() / 8),
7233       MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7234   Chain = FuncTLVGet.getValue(1);
7235 
7236   // Extend loaded pointer if necessary (i.e. if ILP32) to DAG pointer.
7237   FuncTLVGet = DAG.getZExtOrTrunc(FuncTLVGet, DL, PtrVT);
7238 
7239   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7240   MFI.setAdjustsStack(true);
7241 
7242   // TLS calls preserve all registers except those that absolutely must be
7243   // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
7244   // silly).
7245   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
7246   const uint32_t *Mask = TRI->getTLSCallPreservedMask();
7247   if (Subtarget->hasCustomCallingConv())
7248     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
7249 
7250   // Finally, we can make the call. This is just a degenerate version of a
7251   // normal AArch64 call node: x0 takes the address of the descriptor, and
7252   // returns the address of the variable in this thread.
7253   Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue());
7254   Chain =
7255       DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
7256                   Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64),
7257                   DAG.getRegisterMask(Mask), Chain.getValue(1));
7258   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1));
7259 }
7260 
7261 /// Convert a thread-local variable reference into a sequence of instructions to
7262 /// compute the variable's address for the local exec TLS model of ELF targets.
7263 /// The sequence depends on the maximum TLS area size.
7264 SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
7265                                                     SDValue ThreadBase,
7266                                                     const SDLoc &DL,
7267                                                     SelectionDAG &DAG) const {
7268   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7269   SDValue TPOff, Addr;
7270 
7271   switch (DAG.getTarget().Options.TLSSize) {
7272   default:
7273     llvm_unreachable("Unexpected TLS size");
7274 
7275   case 12: {
7276     // mrs   x0, TPIDR_EL0
7277     // add   x0, x0, :tprel_lo12:a
7278     SDValue Var = DAG.getTargetGlobalAddress(
7279         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
7280     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7281                                       Var,
7282                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7283                    0);
7284   }
7285 
7286   case 24: {
7287     // mrs   x0, TPIDR_EL0
7288     // add   x0, x0, :tprel_hi12:a
7289     // add   x0, x0, :tprel_lo12_nc:a
7290     SDValue HiVar = DAG.getTargetGlobalAddress(
7291         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7292     SDValue LoVar = DAG.getTargetGlobalAddress(
7293         GV, DL, PtrVT, 0,
7294         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7295     Addr = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
7296                                       HiVar,
7297                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7298                    0);
7299     return SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, Addr,
7300                                       LoVar,
7301                                       DAG.getTargetConstant(0, DL, MVT::i32)),
7302                    0);
7303   }
7304 
7305   case 32: {
7306     // mrs   x1, TPIDR_EL0
7307     // movz  x0, #:tprel_g1:a
7308     // movk  x0, #:tprel_g0_nc:a
7309     // add   x0, x1, x0
7310     SDValue HiVar = DAG.getTargetGlobalAddress(
7311         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1);
7312     SDValue LoVar = DAG.getTargetGlobalAddress(
7313         GV, DL, PtrVT, 0,
7314         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7315     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7316                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7317                     0);
7318     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7319                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7320                     0);
7321     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7322   }
7323 
7324   case 48: {
7325     // mrs   x1, TPIDR_EL0
7326     // movz  x0, #:tprel_g2:a
7327     // movk  x0, #:tprel_g1_nc:a
7328     // movk  x0, #:tprel_g0_nc:a
7329     // add   x0, x1, x0
7330     SDValue HiVar = DAG.getTargetGlobalAddress(
7331         GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2);
7332     SDValue MiVar = DAG.getTargetGlobalAddress(
7333         GV, DL, PtrVT, 0,
7334         AArch64II::MO_TLS | AArch64II::MO_G1 | AArch64II::MO_NC);
7335     SDValue LoVar = DAG.getTargetGlobalAddress(
7336         GV, DL, PtrVT, 0,
7337         AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC);
7338     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar,
7339                                        DAG.getTargetConstant(32, DL, MVT::i32)),
7340                     0);
7341     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, MiVar,
7342                                        DAG.getTargetConstant(16, DL, MVT::i32)),
7343                     0);
7344     TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar,
7345                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7346                     0);
7347     return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7348   }
7349   }
7350 }
7351 
7352 /// When accessing thread-local variables under either the general-dynamic or
7353 /// local-dynamic system, we make a "TLS-descriptor" call. The variable will
7354 /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
7355 /// is a function pointer to carry out the resolution.
7356 ///
7357 /// The sequence is:
7358 ///    adrp  x0, :tlsdesc:var
7359 ///    ldr   x1, [x0, #:tlsdesc_lo12:var]
7360 ///    add   x0, x0, #:tlsdesc_lo12:var
7361 ///    .tlsdesccall var
7362 ///    blr   x1
7363 ///    (TPIDR_EL0 offset now in x0)
7364 ///
7365 ///  The above sequence must be produced unscheduled, to enable the linker to
7366 ///  optimize/relax this sequence.
7367 ///  Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
7368 ///  above sequence, and expanded really late in the compilation flow, to ensure
7369 ///  the sequence is produced as per above.
7370 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
7371                                                       const SDLoc &DL,
7372                                                       SelectionDAG &DAG) const {
7373   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7374 
7375   SDValue Chain = DAG.getEntryNode();
7376   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7377 
7378   Chain =
7379       DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
7380   SDValue Glue = Chain.getValue(1);
7381 
7382   return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
7383 }
7384 
7385 SDValue
7386 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
7387                                                 SelectionDAG &DAG) const {
7388   assert(Subtarget->isTargetELF() && "This function expects an ELF target");
7389 
7390   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7391 
7392   TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
7393 
7394   if (!EnableAArch64ELFLocalDynamicTLSGeneration) {
7395     if (Model == TLSModel::LocalDynamic)
7396       Model = TLSModel::GeneralDynamic;
7397   }
7398 
7399   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
7400       Model != TLSModel::LocalExec)
7401     report_fatal_error("ELF TLS only supported in small memory model or "
7402                        "in local exec TLS model");
7403   // Different choices can be made for the maximum size of the TLS area for a
7404   // module. For the small address model, the default TLS size is 16MiB and the
7405   // maximum TLS size is 4GiB.
7406   // FIXME: add tiny and large code model support for TLS access models other
7407   // than local exec. We currently generate the same code as small for tiny,
7408   // which may be larger than needed.
7409 
7410   SDValue TPOff;
7411   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7412   SDLoc DL(Op);
7413   const GlobalValue *GV = GA->getGlobal();
7414 
7415   SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
7416 
7417   if (Model == TLSModel::LocalExec) {
7418     return LowerELFTLSLocalExec(GV, ThreadBase, DL, DAG);
7419   } else if (Model == TLSModel::InitialExec) {
7420     TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7421     TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff);
7422   } else if (Model == TLSModel::LocalDynamic) {
7423     // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
7424     // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
7425     // the beginning of the module's TLS region, followed by a DTPREL offset
7426     // calculation.
7427 
7428     // These accesses will need deduplicating if there's more than one.
7429     AArch64FunctionInfo *MFI =
7430         DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
7431     MFI->incNumLocalDynamicTLSAccesses();
7432 
7433     // The call needs a relocation too for linker relaxation. It doesn't make
7434     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7435     // the address.
7436     SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
7437                                                   AArch64II::MO_TLS);
7438 
7439     // Now we can calculate the offset from TPIDR_EL0 to this module's
7440     // thread-local area.
7441     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7442 
7443     // Now use :dtprel_whatever: operations to calculate this variable's offset
7444     // in its thread-storage area.
7445     SDValue HiVar = DAG.getTargetGlobalAddress(
7446         GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7447     SDValue LoVar = DAG.getTargetGlobalAddress(
7448         GV, DL, MVT::i64, 0,
7449         AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7450 
7451     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
7452                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7453                     0);
7454     TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
7455                                        DAG.getTargetConstant(0, DL, MVT::i32)),
7456                     0);
7457   } else if (Model == TLSModel::GeneralDynamic) {
7458     // The call needs a relocation too for linker relaxation. It doesn't make
7459     // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of
7460     // the address.
7461     SDValue SymAddr =
7462         DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS);
7463 
7464     // Finally we can make a call to calculate the offset from tpidr_el0.
7465     TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
7466   } else
7467     llvm_unreachable("Unsupported ELF TLS access model");
7468 
7469   return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
7470 }
7471 
7472 SDValue
7473 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
7474                                                     SelectionDAG &DAG) const {
7475   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
7476 
7477   SDValue Chain = DAG.getEntryNode();
7478   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7479   SDLoc DL(Op);
7480 
7481   SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64);
7482 
7483   // Load the ThreadLocalStoragePointer from the TEB
7484   // A pointer to the TLS array is located at offset 0x58 from the TEB.
7485   SDValue TLSArray =
7486       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL));
7487   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
7488   Chain = TLSArray.getValue(1);
7489 
7490   // Load the TLS index from the C runtime;
7491   // This does the same as getAddr(), but without having a GlobalAddressSDNode.
7492   // This also does the same as LOADgot, but using a generic i32 load,
7493   // while LOADgot only loads i64.
7494   SDValue TLSIndexHi =
7495       DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE);
7496   SDValue TLSIndexLo = DAG.getTargetExternalSymbol(
7497       "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7498   SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi);
7499   SDValue TLSIndex =
7500       DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo);
7501   TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo());
7502   Chain = TLSIndex.getValue(1);
7503 
7504   // The pointer to the thread's TLS data area is at the TLS Index scaled by 8
7505   // offset into the TLSArray.
7506   TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex);
7507   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
7508                              DAG.getConstant(3, DL, PtrVT));
7509   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
7510                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
7511                             MachinePointerInfo());
7512   Chain = TLS.getValue(1);
7513 
7514   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7515   const GlobalValue *GV = GA->getGlobal();
7516   SDValue TGAHi = DAG.getTargetGlobalAddress(
7517       GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12);
7518   SDValue TGALo = DAG.getTargetGlobalAddress(
7519       GV, DL, PtrVT, 0,
7520       AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
7521 
7522   // Add the offset from the start of the .tls section (section base).
7523   SDValue Addr =
7524       SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi,
7525                                  DAG.getTargetConstant(0, DL, MVT::i32)),
7526               0);
7527   Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo);
7528   return Addr;
7529 }
7530 
7531 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
7532                                                      SelectionDAG &DAG) const {
7533   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
7534   if (DAG.getTarget().useEmulatedTLS())
7535     return LowerToTLSEmulatedModel(GA, DAG);
7536 
7537   if (Subtarget->isTargetDarwin())
7538     return LowerDarwinGlobalTLSAddress(Op, DAG);
7539   if (Subtarget->isTargetELF())
7540     return LowerELFGlobalTLSAddress(Op, DAG);
7541   if (Subtarget->isTargetWindows())
7542     return LowerWindowsGlobalTLSAddress(Op, DAG);
7543 
7544   llvm_unreachable("Unexpected platform trying to use TLS");
7545 }
7546 
7547 // Looks through \param Val to determine the bit that can be used to
7548 // check the sign of the value. It returns the unextended value and
7549 // the sign bit position.
7550 std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {
7551   if (Val.getOpcode() == ISD::SIGN_EXTEND_INREG)
7552     return {Val.getOperand(0),
7553             cast<VTSDNode>(Val.getOperand(1))->getVT().getFixedSizeInBits() -
7554                 1};
7555 
7556   if (Val.getOpcode() == ISD::SIGN_EXTEND)
7557     return {Val.getOperand(0),
7558             Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1};
7559 
7560   return {Val, Val.getValueSizeInBits() - 1};
7561 }
7562 
7563 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
7564   SDValue Chain = Op.getOperand(0);
7565   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
7566   SDValue LHS = Op.getOperand(2);
7567   SDValue RHS = Op.getOperand(3);
7568   SDValue Dest = Op.getOperand(4);
7569   SDLoc dl(Op);
7570 
7571   MachineFunction &MF = DAG.getMachineFunction();
7572   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
7573   // will not be produced, as they are conditional branch instructions that do
7574   // not set flags.
7575   bool ProduceNonFlagSettingCondBr =
7576       !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
7577 
7578   // Handle f128 first, since lowering it will result in comparing the return
7579   // value of a libcall against zero, which is just what the rest of LowerBR_CC
7580   // is expecting to deal with.
7581   if (LHS.getValueType() == MVT::f128) {
7582     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
7583 
7584     // If softenSetCCOperands returned a scalar, we need to compare the result
7585     // against zero to select between true and false values.
7586     if (!RHS.getNode()) {
7587       RHS = DAG.getConstant(0, dl, LHS.getValueType());
7588       CC = ISD::SETNE;
7589     }
7590   }
7591 
7592   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
7593   // instruction.
7594   if (ISD::isOverflowIntrOpRes(LHS) && isOneConstant(RHS) &&
7595       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
7596     // Only lower legal XALUO ops.
7597     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
7598       return SDValue();
7599 
7600     // The actual operation with overflow check.
7601     AArch64CC::CondCode OFCC;
7602     SDValue Value, Overflow;
7603     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG);
7604 
7605     if (CC == ISD::SETNE)
7606       OFCC = getInvertedCondCode(OFCC);
7607     SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
7608 
7609     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7610                        Overflow);
7611   }
7612 
7613   if (LHS.getValueType().isInteger()) {
7614     assert((LHS.getValueType() == RHS.getValueType()) &&
7615            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
7616 
7617     // If the RHS of the comparison is zero, we can potentially fold this
7618     // to a specialized branch.
7619     const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
7620     if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
7621       if (CC == ISD::SETEQ) {
7622         // See if we can use a TBZ to fold in an AND as well.
7623         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7624         // out of bounds, a late MI-layer pass rewrites branches.
7625         // 403.gcc is an example that hits this case.
7626         if (LHS.getOpcode() == ISD::AND &&
7627             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7628             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7629           SDValue Test = LHS.getOperand(0);
7630           uint64_t Mask = LHS.getConstantOperandVal(1);
7631           return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
7632                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7633                              Dest);
7634         }
7635 
7636         return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
7637       } else if (CC == ISD::SETNE) {
7638         // See if we can use a TBZ to fold in an AND as well.
7639         // TBZ has a smaller branch displacement than CBZ.  If the offset is
7640         // out of bounds, a late MI-layer pass rewrites branches.
7641         // 403.gcc is an example that hits this case.
7642         if (LHS.getOpcode() == ISD::AND &&
7643             isa<ConstantSDNode>(LHS.getOperand(1)) &&
7644             isPowerOf2_64(LHS.getConstantOperandVal(1))) {
7645           SDValue Test = LHS.getOperand(0);
7646           uint64_t Mask = LHS.getConstantOperandVal(1);
7647           return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
7648                              DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
7649                              Dest);
7650         }
7651 
7652         return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
7653       } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) {
7654         // Don't combine AND since emitComparison converts the AND to an ANDS
7655         // (a.k.a. TST) and the test in the test bit and branch instruction
7656         // becomes redundant.  This would also increase register pressure.
7657         uint64_t SignBitPos;
7658         std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7659         return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
7660                            DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7661       }
7662     }
7663     if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
7664         LHS.getOpcode() != ISD::AND && ProduceNonFlagSettingCondBr) {
7665       // Don't combine AND since emitComparison converts the AND to an ANDS
7666       // (a.k.a. TST) and the test in the test bit and branch instruction
7667       // becomes redundant.  This would also increase register pressure.
7668       uint64_t SignBitPos;
7669       std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS);
7670       return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
7671                          DAG.getConstant(SignBitPos, dl, MVT::i64), Dest);
7672     }
7673 
7674     SDValue CCVal;
7675     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
7676     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
7677                        Cmp);
7678   }
7679 
7680   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::bf16 ||
7681          LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
7682 
7683   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
7684   // clean.  Some of them require two branches to implement.
7685   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
7686   AArch64CC::CondCode CC1, CC2;
7687   changeFPCCToAArch64CC(CC, CC1, CC2);
7688   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
7689   SDValue BR1 =
7690       DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
7691   if (CC2 != AArch64CC::AL) {
7692     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
7693     return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
7694                        Cmp);
7695   }
7696 
7697   return BR1;
7698 }
7699 
7700 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
7701                                               SelectionDAG &DAG) const {
7702   if (!Subtarget->hasNEON())
7703     return SDValue();
7704 
7705   EVT VT = Op.getValueType();
7706   EVT IntVT = VT.changeTypeToInteger();
7707   SDLoc DL(Op);
7708 
7709   SDValue In1 = Op.getOperand(0);
7710   SDValue In2 = Op.getOperand(1);
7711   EVT SrcVT = In2.getValueType();
7712 
7713   if (SrcVT.bitsLT(VT))
7714     In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
7715   else if (SrcVT.bitsGT(VT))
7716     In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL));
7717 
7718   if (VT.isScalableVector())
7719     IntVT =
7720         getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
7721 
7722   if (VT != In2.getValueType())
7723     return SDValue();
7724 
7725   auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
7726     if (VT.isScalableVector())
7727       return getSVESafeBitCast(VT, Op, DAG);
7728 
7729     return DAG.getBitcast(VT, Op);
7730   };
7731 
7732   SDValue VecVal1, VecVal2;
7733   EVT VecVT;
7734   auto SetVecVal = [&](int Idx = -1) {
7735     if (!VT.isVector()) {
7736       VecVal1 =
7737           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In1);
7738       VecVal2 =
7739           DAG.getTargetInsertSubreg(Idx, DL, VecVT, DAG.getUNDEF(VecVT), In2);
7740     } else {
7741       VecVal1 = BitCast(VecVT, In1, DAG);
7742       VecVal2 = BitCast(VecVT, In2, DAG);
7743     }
7744   };
7745   if (VT.isVector()) {
7746     VecVT = IntVT;
7747     SetVecVal();
7748   } else if (VT == MVT::f64) {
7749     VecVT = MVT::v2i64;
7750     SetVecVal(AArch64::dsub);
7751   } else if (VT == MVT::f32) {
7752     VecVT = MVT::v4i32;
7753     SetVecVal(AArch64::ssub);
7754   } else if (VT == MVT::f16) {
7755     VecVT = MVT::v8i16;
7756     SetVecVal(AArch64::hsub);
7757   } else {
7758     llvm_unreachable("Invalid type for copysign!");
7759   }
7760 
7761   unsigned BitWidth = In1.getScalarValueSizeInBits();
7762   SDValue SignMaskV = DAG.getConstant(~APInt::getSignMask(BitWidth), DL, VecVT);
7763 
7764   // We want to materialize a mask with every bit but the high bit set, but the
7765   // AdvSIMD immediate moves cannot materialize that in a single instruction for
7766   // 64-bit elements. Instead, materialize all bits set and then negate that.
7767   if (VT == MVT::f64 || VT == MVT::v2f64) {
7768     SignMaskV = DAG.getConstant(APInt::getAllOnes(BitWidth), DL, VecVT);
7769     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, SignMaskV);
7770     SignMaskV = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, SignMaskV);
7771     SignMaskV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, SignMaskV);
7772   }
7773 
7774   SDValue BSP =
7775       DAG.getNode(AArch64ISD::BSP, DL, VecVT, SignMaskV, VecVal1, VecVal2);
7776   if (VT == MVT::f16)
7777     return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, BSP);
7778   if (VT == MVT::f32)
7779     return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, BSP);
7780   if (VT == MVT::f64)
7781     return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, BSP);
7782 
7783   return BitCast(VT, BSP, DAG);
7784 }
7785 
7786 SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
7787   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
7788           Attribute::NoImplicitFloat))
7789     return SDValue();
7790 
7791   if (!Subtarget->hasNEON())
7792     return SDValue();
7793 
7794   // While there is no integer popcount instruction, it can
7795   // be more efficiently lowered to the following sequence that uses
7796   // AdvSIMD registers/instructions as long as the copies to/from
7797   // the AdvSIMD registers are cheap.
7798   //  FMOV    D0, X0        // copy 64-bit int to vector, high bits zero'd
7799   //  CNT     V0.8B, V0.8B  // 8xbyte pop-counts
7800   //  ADDV    B0, V0.8B     // sum 8xbyte pop-counts
7801   //  UMOV    X0, V0.B[0]   // copy byte result back to integer reg
7802   SDValue Val = Op.getOperand(0);
7803   SDLoc DL(Op);
7804   EVT VT = Op.getValueType();
7805 
7806   if (VT == MVT::i32 || VT == MVT::i64) {
7807     if (VT == MVT::i32)
7808       Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
7809     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
7810 
7811     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
7812     SDValue UaddLV = DAG.getNode(
7813         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7814         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7815 
7816     if (VT == MVT::i64)
7817       UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
7818     return UaddLV;
7819   } else if (VT == MVT::i128) {
7820     Val = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Val);
7821 
7822     SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v16i8, Val);
7823     SDValue UaddLV = DAG.getNode(
7824         ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
7825         DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
7826 
7827     return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, UaddLV);
7828   }
7829 
7830   if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
7831     return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
7832 
7833   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
7834           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
7835          "Unexpected type for custom ctpop lowering");
7836 
7837   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
7838   Val = DAG.getBitcast(VT8Bit, Val);
7839   Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
7840 
7841   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
7842   unsigned EltSize = 8;
7843   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
7844   while (EltSize != VT.getScalarSizeInBits()) {
7845     EltSize *= 2;
7846     NumElts /= 2;
7847     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
7848     Val = DAG.getNode(
7849         ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
7850         DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
7851   }
7852 
7853   return Val;
7854 }
7855 
7856 SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
7857   EVT VT = Op.getValueType();
7858   assert(VT.isScalableVector() ||
7859          useSVEForFixedLengthVectorVT(
7860              VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()));
7861 
7862   SDLoc DL(Op);
7863   SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0));
7864   return DAG.getNode(ISD::CTLZ, DL, VT, RBIT);
7865 }
7866 
7867 SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
7868                                            SelectionDAG &DAG) const {
7869 
7870   EVT VT = Op.getValueType();
7871   SDLoc DL(Op);
7872   unsigned Opcode = Op.getOpcode();
7873   ISD::CondCode CC;
7874   switch (Opcode) {
7875   default:
7876     llvm_unreachable("Wrong instruction");
7877   case ISD::SMAX:
7878     CC = ISD::SETGT;
7879     break;
7880   case ISD::SMIN:
7881     CC = ISD::SETLT;
7882     break;
7883   case ISD::UMAX:
7884     CC = ISD::SETUGT;
7885     break;
7886   case ISD::UMIN:
7887     CC = ISD::SETULT;
7888     break;
7889   }
7890 
7891   if (VT.isScalableVector() ||
7892       useSVEForFixedLengthVectorVT(
7893           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
7894     switch (Opcode) {
7895     default:
7896       llvm_unreachable("Wrong instruction");
7897     case ISD::SMAX:
7898       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
7899     case ISD::SMIN:
7900       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
7901     case ISD::UMAX:
7902       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
7903     case ISD::UMIN:
7904       return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
7905     }
7906   }
7907 
7908   SDValue Op0 = Op.getOperand(0);
7909   SDValue Op1 = Op.getOperand(1);
7910   SDValue Cond = DAG.getSetCC(DL, VT, Op0, Op1, CC);
7911   return DAG.getSelect(DL, VT, Cond, Op0, Op1);
7912 }
7913 
7914 SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
7915                                                SelectionDAG &DAG) const {
7916   EVT VT = Op.getValueType();
7917 
7918   if (VT.isScalableVector() ||
7919       useSVEForFixedLengthVectorVT(
7920           VT, /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors()))
7921     return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU);
7922 
7923   SDLoc DL(Op);
7924   SDValue REVB;
7925   MVT VST;
7926 
7927   switch (VT.getSimpleVT().SimpleTy) {
7928   default:
7929     llvm_unreachable("Invalid type for bitreverse!");
7930 
7931   case MVT::v2i32: {
7932     VST = MVT::v8i8;
7933     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7934 
7935     break;
7936   }
7937 
7938   case MVT::v4i32: {
7939     VST = MVT::v16i8;
7940     REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0));
7941 
7942     break;
7943   }
7944 
7945   case MVT::v1i64: {
7946     VST = MVT::v8i8;
7947     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7948 
7949     break;
7950   }
7951 
7952   case MVT::v2i64: {
7953     VST = MVT::v16i8;
7954     REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0));
7955 
7956     break;
7957   }
7958   }
7959 
7960   return DAG.getNode(AArch64ISD::NVCAST, DL, VT,
7961                      DAG.getNode(ISD::BITREVERSE, DL, VST, REVB));
7962 }
7963 
7964 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
7965 
7966   if (Op.getValueType().isVector())
7967     return LowerVSETCC(Op, DAG);
7968 
7969   bool IsStrict = Op->isStrictFPOpcode();
7970   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
7971   unsigned OpNo = IsStrict ? 1 : 0;
7972   SDValue Chain;
7973   if (IsStrict)
7974     Chain = Op.getOperand(0);
7975   SDValue LHS = Op.getOperand(OpNo + 0);
7976   SDValue RHS = Op.getOperand(OpNo + 1);
7977   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(OpNo + 2))->get();
7978   SDLoc dl(Op);
7979 
7980   // We chose ZeroOrOneBooleanContents, so use zero and one.
7981   EVT VT = Op.getValueType();
7982   SDValue TVal = DAG.getConstant(1, dl, VT);
7983   SDValue FVal = DAG.getConstant(0, dl, VT);
7984 
7985   // Handle f128 first, since one possible outcome is a normal integer
7986   // comparison which gets picked up by the next if statement.
7987   if (LHS.getValueType() == MVT::f128) {
7988     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS, Chain,
7989                         IsSignaling);
7990 
7991     // If softenSetCCOperands returned a scalar, use it.
7992     if (!RHS.getNode()) {
7993       assert(LHS.getValueType() == Op.getValueType() &&
7994              "Unexpected setcc expansion!");
7995       return IsStrict ? DAG.getMergeValues({LHS, Chain}, dl) : LHS;
7996     }
7997   }
7998 
7999   if (LHS.getValueType().isInteger()) {
8000     SDValue CCVal;
8001     SDValue Cmp = getAArch64Cmp(
8002         LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
8003 
8004     // Note that we inverted the condition above, so we reverse the order of
8005     // the true and false operands here.  This will allow the setcc to be
8006     // matched to a single CSINC instruction.
8007     SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
8008     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
8009   }
8010 
8011   // Now we know we're dealing with FP values.
8012   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8013          LHS.getValueType() == MVT::f64);
8014 
8015   // If that fails, we'll need to perform an FCMP + CSEL sequence.  Go ahead
8016   // and do the comparison.
8017   SDValue Cmp;
8018   if (IsStrict)
8019     Cmp = emitStrictFPComparison(LHS, RHS, dl, DAG, Chain, IsSignaling);
8020   else
8021     Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8022 
8023   AArch64CC::CondCode CC1, CC2;
8024   changeFPCCToAArch64CC(CC, CC1, CC2);
8025   SDValue Res;
8026   if (CC2 == AArch64CC::AL) {
8027     changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, LHS.getValueType()), CC1,
8028                           CC2);
8029     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8030 
8031     // Note that we inverted the condition above, so we reverse the order of
8032     // the true and false operands here.  This will allow the setcc to be
8033     // matched to a single CSINC instruction.
8034     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp);
8035   } else {
8036     // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
8037     // totally clean.  Some of them require two CSELs to implement.  As is in
8038     // this case, we emit the first CSEL and then emit a second using the output
8039     // of the first as the RHS.  We're effectively OR'ing the two CC's together.
8040 
8041     // FIXME: It would be nice if we could match the two CSELs to two CSINCs.
8042     SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8043     SDValue CS1 =
8044         DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8045 
8046     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8047     Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8048   }
8049   return IsStrict ? DAG.getMergeValues({Res, Cmp.getValue(1)}, dl) : Res;
8050 }
8051 
8052 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
8053                                               SDValue RHS, SDValue TVal,
8054                                               SDValue FVal, const SDLoc &dl,
8055                                               SelectionDAG &DAG) const {
8056   // Handle f128 first, because it will result in a comparison of some RTLIB
8057   // call result against zero.
8058   if (LHS.getValueType() == MVT::f128) {
8059     softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl, LHS, RHS);
8060 
8061     // If softenSetCCOperands returned a scalar, we need to compare the result
8062     // against zero to select between true and false values.
8063     if (!RHS.getNode()) {
8064       RHS = DAG.getConstant(0, dl, LHS.getValueType());
8065       CC = ISD::SETNE;
8066     }
8067   }
8068 
8069   // Also handle f16, for which we need to do a f32 comparison.
8070   if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) {
8071     LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
8072     RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
8073   }
8074 
8075   // Next, handle integers.
8076   if (LHS.getValueType().isInteger()) {
8077     assert((LHS.getValueType() == RHS.getValueType()) &&
8078            (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64));
8079 
8080     ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
8081     ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
8082     ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
8083     // Check for sign pattern (SELECT_CC setgt, iN lhs, -1, 1, -1) and transform
8084     // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
8085     // supported types.
8086     if (CC == ISD::SETGT && RHSC && RHSC->isAllOnes() && CTVal && CFVal &&
8087         CTVal->isOne() && CFVal->isAllOnes() &&
8088         LHS.getValueType() == TVal.getValueType()) {
8089       EVT VT = LHS.getValueType();
8090       SDValue Shift =
8091           DAG.getNode(ISD::SRA, dl, VT, LHS,
8092                       DAG.getConstant(VT.getSizeInBits() - 1, dl, VT));
8093       return DAG.getNode(ISD::OR, dl, VT, Shift, DAG.getConstant(1, dl, VT));
8094     }
8095 
8096     unsigned Opcode = AArch64ISD::CSEL;
8097 
8098     // If both the TVal and the FVal are constants, see if we can swap them in
8099     // order to for a CSINV or CSINC out of them.
8100     if (CTVal && CFVal && CTVal->isAllOnes() && CFVal->isZero()) {
8101       std::swap(TVal, FVal);
8102       std::swap(CTVal, CFVal);
8103       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8104     } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isZero()) {
8105       std::swap(TVal, FVal);
8106       std::swap(CTVal, CFVal);
8107       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8108     } else if (TVal.getOpcode() == ISD::XOR) {
8109       // If TVal is a NOT we want to swap TVal and FVal so that we can match
8110       // with a CSINV rather than a CSEL.
8111       if (isAllOnesConstant(TVal.getOperand(1))) {
8112         std::swap(TVal, FVal);
8113         std::swap(CTVal, CFVal);
8114         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8115       }
8116     } else if (TVal.getOpcode() == ISD::SUB) {
8117       // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so
8118       // that we can match with a CSNEG rather than a CSEL.
8119       if (isNullConstant(TVal.getOperand(0))) {
8120         std::swap(TVal, FVal);
8121         std::swap(CTVal, CFVal);
8122         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8123       }
8124     } else if (CTVal && CFVal) {
8125       const int64_t TrueVal = CTVal->getSExtValue();
8126       const int64_t FalseVal = CFVal->getSExtValue();
8127       bool Swap = false;
8128 
8129       // If both TVal and FVal are constants, see if FVal is the
8130       // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC
8131       // instead of a CSEL in that case.
8132       if (TrueVal == ~FalseVal) {
8133         Opcode = AArch64ISD::CSINV;
8134       } else if (FalseVal > std::numeric_limits<int64_t>::min() &&
8135                  TrueVal == -FalseVal) {
8136         Opcode = AArch64ISD::CSNEG;
8137       } else if (TVal.getValueType() == MVT::i32) {
8138         // If our operands are only 32-bit wide, make sure we use 32-bit
8139         // arithmetic for the check whether we can use CSINC. This ensures that
8140         // the addition in the check will wrap around properly in case there is
8141         // an overflow (which would not be the case if we do the check with
8142         // 64-bit arithmetic).
8143         const uint32_t TrueVal32 = CTVal->getZExtValue();
8144         const uint32_t FalseVal32 = CFVal->getZExtValue();
8145 
8146         if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
8147           Opcode = AArch64ISD::CSINC;
8148 
8149           if (TrueVal32 > FalseVal32) {
8150             Swap = true;
8151           }
8152         }
8153         // 64-bit check whether we can use CSINC.
8154       } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
8155         Opcode = AArch64ISD::CSINC;
8156 
8157         if (TrueVal > FalseVal) {
8158           Swap = true;
8159         }
8160       }
8161 
8162       // Swap TVal and FVal if necessary.
8163       if (Swap) {
8164         std::swap(TVal, FVal);
8165         std::swap(CTVal, CFVal);
8166         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
8167       }
8168 
8169       if (Opcode != AArch64ISD::CSEL) {
8170         // Drop FVal since we can get its value by simply inverting/negating
8171         // TVal.
8172         FVal = TVal;
8173       }
8174     }
8175 
8176     // Avoid materializing a constant when possible by reusing a known value in
8177     // a register.  However, don't perform this optimization if the known value
8178     // is one, zero or negative one in the case of a CSEL.  We can always
8179     // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the
8180     // FVal, respectively.
8181     ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
8182     if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
8183         !RHSVal->isZero() && !RHSVal->isAllOnes()) {
8184       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8185       // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
8186       // "a != C ? x : a" to avoid materializing C.
8187       if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
8188         TVal = LHS;
8189       else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE)
8190         FVal = LHS;
8191     } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) {
8192       assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
8193       // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
8194       // avoid materializing C.
8195       AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
8196       if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
8197         Opcode = AArch64ISD::CSINV;
8198         TVal = LHS;
8199         FVal = DAG.getConstant(0, dl, FVal.getValueType());
8200       }
8201     }
8202 
8203     SDValue CCVal;
8204     SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl);
8205     EVT VT = TVal.getValueType();
8206     return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
8207   }
8208 
8209   // Now we know we're dealing with FP values.
8210   assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 ||
8211          LHS.getValueType() == MVT::f64);
8212   assert(LHS.getValueType() == RHS.getValueType());
8213   EVT VT = TVal.getValueType();
8214   SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
8215 
8216   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
8217   // clean.  Some of them require two CSELs to implement.
8218   AArch64CC::CondCode CC1, CC2;
8219   changeFPCCToAArch64CC(CC, CC1, CC2);
8220 
8221   if (DAG.getTarget().Options.UnsafeFPMath) {
8222     // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and
8223     // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0.
8224     ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS);
8225     if (RHSVal && RHSVal->isZero()) {
8226       ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal);
8227       ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal);
8228 
8229       if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) &&
8230           CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType())
8231         TVal = LHS;
8232       else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) &&
8233                CFVal && CFVal->isZero() &&
8234                FVal.getValueType() == LHS.getValueType())
8235         FVal = LHS;
8236     }
8237   }
8238 
8239   // Emit first, and possibly only, CSEL.
8240   SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
8241   SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
8242 
8243   // If we need a second CSEL, emit it, using the output of the first as the
8244   // RHS.  We're effectively OR'ing the two CC's together.
8245   if (CC2 != AArch64CC::AL) {
8246     SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
8247     return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
8248   }
8249 
8250   // Otherwise, return the output of the first CSEL.
8251   return CS1;
8252 }
8253 
8254 SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
8255                                                   SelectionDAG &DAG) const {
8256   EVT Ty = Op.getValueType();
8257   auto Idx = Op.getConstantOperandAPInt(2);
8258   int64_t IdxVal = Idx.getSExtValue();
8259   assert(Ty.isScalableVector() &&
8260          "Only expect scalable vectors for custom lowering of VECTOR_SPLICE");
8261 
8262   // We can use the splice instruction for certain index values where we are
8263   // able to efficiently generate the correct predicate. The index will be
8264   // inverted and used directly as the input to the ptrue instruction, i.e.
8265   // -1 -> vl1, -2 -> vl2, etc. The predicate will then be reversed to get the
8266   // splice predicate. However, we can only do this if we can guarantee that
8267   // there are enough elements in the vector, hence we check the index <= min
8268   // number of elements.
8269   Optional<unsigned> PredPattern;
8270   if (Ty.isScalableVector() && IdxVal < 0 &&
8271       (PredPattern = getSVEPredPatternFromNumElements(std::abs(IdxVal))) !=
8272           None) {
8273     SDLoc DL(Op);
8274 
8275     // Create a predicate where all but the last -IdxVal elements are false.
8276     EVT PredVT = Ty.changeVectorElementType(MVT::i1);
8277     SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
8278     Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
8279 
8280     // Now splice the two inputs together using the predicate.
8281     return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0),
8282                        Op.getOperand(1));
8283   }
8284 
8285   // This will select to an EXT instruction, which has a maximum immediate
8286   // value of 255, hence 2048-bits is the maximum value we can lower.
8287   if (IdxVal >= 0 &&
8288       IdxVal < int64_t(2048 / Ty.getVectorElementType().getSizeInBits()))
8289     return Op;
8290 
8291   return SDValue();
8292 }
8293 
8294 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
8295                                               SelectionDAG &DAG) const {
8296   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8297   SDValue LHS = Op.getOperand(0);
8298   SDValue RHS = Op.getOperand(1);
8299   SDValue TVal = Op.getOperand(2);
8300   SDValue FVal = Op.getOperand(3);
8301   SDLoc DL(Op);
8302   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8303 }
8304 
8305 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
8306                                            SelectionDAG &DAG) const {
8307   SDValue CCVal = Op->getOperand(0);
8308   SDValue TVal = Op->getOperand(1);
8309   SDValue FVal = Op->getOperand(2);
8310   SDLoc DL(Op);
8311 
8312   EVT Ty = Op.getValueType();
8313   if (Ty.isScalableVector()) {
8314     SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
8315     MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
8316     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
8317     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8318   }
8319 
8320   if (useSVEForFixedLengthVectorVT(Ty)) {
8321     // FIXME: Ideally this would be the same as above using i1 types, however
8322     // for the moment we can't deal with fixed i1 vector types properly, so
8323     // instead extend the predicate to a result type sized integer vector.
8324     MVT SplatValVT = MVT::getIntegerVT(Ty.getScalarSizeInBits());
8325     MVT PredVT = MVT::getVectorVT(SplatValVT, Ty.getVectorElementCount());
8326     SDValue SplatVal = DAG.getSExtOrTrunc(CCVal, DL, SplatValVT);
8327     SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, SplatVal);
8328     return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
8329   }
8330 
8331   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select
8332   // instruction.
8333   if (ISD::isOverflowIntrOpRes(CCVal)) {
8334     // Only lower legal XALUO ops.
8335     if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0)))
8336       return SDValue();
8337 
8338     AArch64CC::CondCode OFCC;
8339     SDValue Value, Overflow;
8340     std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
8341     SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
8342 
8343     return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
8344                        CCVal, Overflow);
8345   }
8346 
8347   // Lower it the same way as we would lower a SELECT_CC node.
8348   ISD::CondCode CC;
8349   SDValue LHS, RHS;
8350   if (CCVal.getOpcode() == ISD::SETCC) {
8351     LHS = CCVal.getOperand(0);
8352     RHS = CCVal.getOperand(1);
8353     CC = cast<CondCodeSDNode>(CCVal.getOperand(2))->get();
8354   } else {
8355     LHS = CCVal;
8356     RHS = DAG.getConstant(0, DL, CCVal.getValueType());
8357     CC = ISD::SETNE;
8358   }
8359   return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
8360 }
8361 
8362 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
8363                                               SelectionDAG &DAG) const {
8364   // Jump table entries as PC relative offsets. No additional tweaking
8365   // is necessary here. Just get the address of the jump table.
8366   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
8367 
8368   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8369       !Subtarget->isTargetMachO()) {
8370     return getAddrLarge(JT, DAG);
8371   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8372     return getAddrTiny(JT, DAG);
8373   }
8374   return getAddr(JT, DAG);
8375 }
8376 
8377 SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
8378                                           SelectionDAG &DAG) const {
8379   // Jump table entries as PC relative offsets. No additional tweaking
8380   // is necessary here. Just get the address of the jump table.
8381   SDLoc DL(Op);
8382   SDValue JT = Op.getOperand(1);
8383   SDValue Entry = Op.getOperand(2);
8384   int JTI = cast<JumpTableSDNode>(JT.getNode())->getIndex();
8385 
8386   auto *AFI = DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8387   AFI->setJumpTableEntryInfo(JTI, 4, nullptr);
8388 
8389   SDNode *Dest =
8390       DAG.getMachineNode(AArch64::JumpTableDest32, DL, MVT::i64, MVT::i64, JT,
8391                          Entry, DAG.getTargetJumpTable(JTI, MVT::i32));
8392   return DAG.getNode(ISD::BRIND, DL, MVT::Other, Op.getOperand(0),
8393                      SDValue(Dest, 0));
8394 }
8395 
8396 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
8397                                                  SelectionDAG &DAG) const {
8398   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
8399 
8400   if (getTargetMachine().getCodeModel() == CodeModel::Large) {
8401     // Use the GOT for the large code model on iOS.
8402     if (Subtarget->isTargetMachO()) {
8403       return getGOT(CP, DAG);
8404     }
8405     return getAddrLarge(CP, DAG);
8406   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8407     return getAddrTiny(CP, DAG);
8408   } else {
8409     return getAddr(CP, DAG);
8410   }
8411 }
8412 
8413 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
8414                                                SelectionDAG &DAG) const {
8415   BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
8416   if (getTargetMachine().getCodeModel() == CodeModel::Large &&
8417       !Subtarget->isTargetMachO()) {
8418     return getAddrLarge(BA, DAG);
8419   } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
8420     return getAddrTiny(BA, DAG);
8421   }
8422   return getAddr(BA, DAG);
8423 }
8424 
8425 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
8426                                                  SelectionDAG &DAG) const {
8427   AArch64FunctionInfo *FuncInfo =
8428       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8429 
8430   SDLoc DL(Op);
8431   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(),
8432                                  getPointerTy(DAG.getDataLayout()));
8433   FR = DAG.getZExtOrTrunc(FR, DL, getPointerMemTy(DAG.getDataLayout()));
8434   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8435   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8436                       MachinePointerInfo(SV));
8437 }
8438 
8439 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
8440                                                   SelectionDAG &DAG) const {
8441   AArch64FunctionInfo *FuncInfo =
8442       DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
8443 
8444   SDLoc DL(Op);
8445   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
8446                                      ? FuncInfo->getVarArgsGPRIndex()
8447                                      : FuncInfo->getVarArgsStackIndex(),
8448                                  getPointerTy(DAG.getDataLayout()));
8449   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8450   return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
8451                       MachinePointerInfo(SV));
8452 }
8453 
8454 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
8455                                                   SelectionDAG &DAG) const {
8456   // The layout of the va_list struct is specified in the AArch64 Procedure Call
8457   // Standard, section B.3.
8458   MachineFunction &MF = DAG.getMachineFunction();
8459   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
8460   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8461   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8462   auto PtrVT = getPointerTy(DAG.getDataLayout());
8463   SDLoc DL(Op);
8464 
8465   SDValue Chain = Op.getOperand(0);
8466   SDValue VAList = Op.getOperand(1);
8467   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8468   SmallVector<SDValue, 4> MemOps;
8469 
8470   // void *__stack at offset 0
8471   unsigned Offset = 0;
8472   SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
8473   Stack = DAG.getZExtOrTrunc(Stack, DL, PtrMemVT);
8474   MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
8475                                 MachinePointerInfo(SV), Align(PtrSize)));
8476 
8477   // void *__gr_top at offset 8 (4 on ILP32)
8478   Offset += PtrSize;
8479   int GPRSize = FuncInfo->getVarArgsGPRSize();
8480   if (GPRSize > 0) {
8481     SDValue GRTop, GRTopAddr;
8482 
8483     GRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8484                             DAG.getConstant(Offset, DL, PtrVT));
8485 
8486     GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT);
8487     GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop,
8488                         DAG.getConstant(GPRSize, DL, PtrVT));
8489     GRTop = DAG.getZExtOrTrunc(GRTop, DL, PtrMemVT);
8490 
8491     MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
8492                                   MachinePointerInfo(SV, Offset),
8493                                   Align(PtrSize)));
8494   }
8495 
8496   // void *__vr_top at offset 16 (8 on ILP32)
8497   Offset += PtrSize;
8498   int FPRSize = FuncInfo->getVarArgsFPRSize();
8499   if (FPRSize > 0) {
8500     SDValue VRTop, VRTopAddr;
8501     VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8502                             DAG.getConstant(Offset, DL, PtrVT));
8503 
8504     VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT);
8505     VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop,
8506                         DAG.getConstant(FPRSize, DL, PtrVT));
8507     VRTop = DAG.getZExtOrTrunc(VRTop, DL, PtrMemVT);
8508 
8509     MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
8510                                   MachinePointerInfo(SV, Offset),
8511                                   Align(PtrSize)));
8512   }
8513 
8514   // int __gr_offs at offset 24 (12 on ILP32)
8515   Offset += PtrSize;
8516   SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8517                                    DAG.getConstant(Offset, DL, PtrVT));
8518   MemOps.push_back(
8519       DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32),
8520                    GROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8521 
8522   // int __vr_offs at offset 28 (16 on ILP32)
8523   Offset += 4;
8524   SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8525                                    DAG.getConstant(Offset, DL, PtrVT));
8526   MemOps.push_back(
8527       DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32),
8528                    VROffsAddr, MachinePointerInfo(SV, Offset), Align(4)));
8529 
8530   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
8531 }
8532 
8533 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
8534                                             SelectionDAG &DAG) const {
8535   MachineFunction &MF = DAG.getMachineFunction();
8536 
8537   if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
8538     return LowerWin64_VASTART(Op, DAG);
8539   else if (Subtarget->isTargetDarwin())
8540     return LowerDarwin_VASTART(Op, DAG);
8541   else
8542     return LowerAAPCS_VASTART(Op, DAG);
8543 }
8544 
8545 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
8546                                            SelectionDAG &DAG) const {
8547   // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
8548   // pointer.
8549   SDLoc DL(Op);
8550   unsigned PtrSize = Subtarget->isTargetILP32() ? 4 : 8;
8551   unsigned VaListSize =
8552       (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
8553           ? PtrSize
8554           : Subtarget->isTargetILP32() ? 20 : 32;
8555   const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
8556   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
8557 
8558   return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2),
8559                        DAG.getConstant(VaListSize, DL, MVT::i32),
8560                        Align(PtrSize), false, false, false,
8561                        MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
8562 }
8563 
8564 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
8565   assert(Subtarget->isTargetDarwin() &&
8566          "automatic va_arg instruction only works on Darwin");
8567 
8568   const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
8569   EVT VT = Op.getValueType();
8570   SDLoc DL(Op);
8571   SDValue Chain = Op.getOperand(0);
8572   SDValue Addr = Op.getOperand(1);
8573   MaybeAlign Align(Op.getConstantOperandVal(3));
8574   unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
8575   auto PtrVT = getPointerTy(DAG.getDataLayout());
8576   auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
8577   SDValue VAList =
8578       DAG.getLoad(PtrMemVT, DL, Chain, Addr, MachinePointerInfo(V));
8579   Chain = VAList.getValue(1);
8580   VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
8581 
8582   if (VT.isScalableVector())
8583     report_fatal_error("Passing SVE types to variadic functions is "
8584                        "currently not supported");
8585 
8586   if (Align && *Align > MinSlotSize) {
8587     VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8588                          DAG.getConstant(Align->value() - 1, DL, PtrVT));
8589     VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
8590                          DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
8591   }
8592 
8593   Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8594   unsigned ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
8595 
8596   // Scalar integer and FP values smaller than 64 bits are implicitly extended
8597   // up to 64 bits.  At the very least, we have to increase the striding of the
8598   // vaargs list to match this, and for FP values we need to introduce
8599   // FP_ROUND nodes as well.
8600   if (VT.isInteger() && !VT.isVector())
8601     ArgSize = std::max(ArgSize, MinSlotSize);
8602   bool NeedFPTrunc = false;
8603   if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) {
8604     ArgSize = 8;
8605     NeedFPTrunc = true;
8606   }
8607 
8608   // Increment the pointer, VAList, to the next vaarg
8609   SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
8610                                DAG.getConstant(ArgSize, DL, PtrVT));
8611   VANext = DAG.getZExtOrTrunc(VANext, DL, PtrMemVT);
8612 
8613   // Store the incremented VAList to the legalized pointer
8614   SDValue APStore =
8615       DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
8616 
8617   // Load the actual argument out of the pointer VAList
8618   if (NeedFPTrunc) {
8619     // Load the value as an f64.
8620     SDValue WideFP =
8621         DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
8622     // Round the value down to an f32.
8623     SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
8624                                    DAG.getIntPtrConstant(1, DL));
8625     SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
8626     // Merge the rounded value with the chain output of the load.
8627     return DAG.getMergeValues(Ops, DL);
8628   }
8629 
8630   return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
8631 }
8632 
8633 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
8634                                               SelectionDAG &DAG) const {
8635   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8636   MFI.setFrameAddressIsTaken(true);
8637 
8638   EVT VT = Op.getValueType();
8639   SDLoc DL(Op);
8640   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8641   SDValue FrameAddr =
8642       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
8643   while (Depth--)
8644     FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
8645                             MachinePointerInfo());
8646 
8647   if (Subtarget->isTargetILP32())
8648     FrameAddr = DAG.getNode(ISD::AssertZext, DL, MVT::i64, FrameAddr,
8649                             DAG.getValueType(VT));
8650 
8651   return FrameAddr;
8652 }
8653 
8654 SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
8655                                               SelectionDAG &DAG) const {
8656   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8657 
8658   EVT VT = getPointerTy(DAG.getDataLayout());
8659   SDLoc DL(Op);
8660   int FI = MFI.CreateFixedObject(4, 0, false);
8661   return DAG.getFrameIndex(FI, VT);
8662 }
8663 
8664 #define GET_REGISTER_MATCHER
8665 #include "AArch64GenAsmMatcher.inc"
8666 
8667 // FIXME? Maybe this could be a TableGen attribute on some registers and
8668 // this table could be generated automatically from RegInfo.
8669 Register AArch64TargetLowering::
8670 getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {
8671   Register Reg = MatchRegisterName(RegName);
8672   if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
8673     const MCRegisterInfo *MRI = Subtarget->getRegisterInfo();
8674     unsigned DwarfRegNum = MRI->getDwarfRegNum(Reg, false);
8675     if (!Subtarget->isXRegisterReserved(DwarfRegNum))
8676       Reg = 0;
8677   }
8678   if (Reg)
8679     return Reg;
8680   report_fatal_error(Twine("Invalid register name \""
8681                               + StringRef(RegName)  + "\"."));
8682 }
8683 
8684 SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
8685                                                      SelectionDAG &DAG) const {
8686   DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
8687 
8688   EVT VT = Op.getValueType();
8689   SDLoc DL(Op);
8690 
8691   SDValue FrameAddr =
8692       DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
8693   SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8694 
8695   return DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset);
8696 }
8697 
8698 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
8699                                                SelectionDAG &DAG) const {
8700   MachineFunction &MF = DAG.getMachineFunction();
8701   MachineFrameInfo &MFI = MF.getFrameInfo();
8702   MFI.setReturnAddressIsTaken(true);
8703 
8704   EVT VT = Op.getValueType();
8705   SDLoc DL(Op);
8706   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8707   SDValue ReturnAddress;
8708   if (Depth) {
8709     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
8710     SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
8711     ReturnAddress = DAG.getLoad(
8712         VT, DL, DAG.getEntryNode(),
8713         DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), MachinePointerInfo());
8714   } else {
8715     // Return LR, which contains the return address. Mark it an implicit
8716     // live-in.
8717     Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
8718     ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
8719   }
8720 
8721   // The XPACLRI instruction assembles to a hint-space instruction before
8722   // Armv8.3-A therefore this instruction can be safely used for any pre
8723   // Armv8.3-A architectures. On Armv8.3-A and onwards XPACI is available so use
8724   // that instead.
8725   SDNode *St;
8726   if (Subtarget->hasPAuth()) {
8727     St = DAG.getMachineNode(AArch64::XPACI, DL, VT, ReturnAddress);
8728   } else {
8729     // XPACLRI operates on LR therefore we must move the operand accordingly.
8730     SDValue Chain =
8731         DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::LR, ReturnAddress);
8732     St = DAG.getMachineNode(AArch64::XPACLRI, DL, VT, Chain);
8733   }
8734   return SDValue(St, 0);
8735 }
8736 
8737 /// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
8738 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
8739 SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
8740                                                SelectionDAG &DAG) const {
8741   SDValue Lo, Hi;
8742   expandShiftParts(Op.getNode(), Lo, Hi, DAG);
8743   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
8744 }
8745 
8746 bool AArch64TargetLowering::isOffsetFoldingLegal(
8747     const GlobalAddressSDNode *GA) const {
8748   // Offsets are folded in the DAG combine rather than here so that we can
8749   // intelligently choose an offset based on the uses.
8750   return false;
8751 }
8752 
8753 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
8754                                          bool OptForSize) const {
8755   bool IsLegal = false;
8756   // We can materialize #0.0 as fmov $Rd, XZR for 64-bit, 32-bit cases, and
8757   // 16-bit case when target has full fp16 support.
8758   // FIXME: We should be able to handle f128 as well with a clever lowering.
8759   const APInt ImmInt = Imm.bitcastToAPInt();
8760   if (VT == MVT::f64)
8761     IsLegal = AArch64_AM::getFP64Imm(ImmInt) != -1 || Imm.isPosZero();
8762   else if (VT == MVT::f32)
8763     IsLegal = AArch64_AM::getFP32Imm(ImmInt) != -1 || Imm.isPosZero();
8764   else if (VT == MVT::f16 && Subtarget->hasFullFP16())
8765     IsLegal = AArch64_AM::getFP16Imm(ImmInt) != -1 || Imm.isPosZero();
8766   // TODO: fmov h0, w0 is also legal, however on't have an isel pattern to
8767   //       generate that fmov.
8768 
8769   // If we can not materialize in immediate field for fmov, check if the
8770   // value can be encoded as the immediate operand of a logical instruction.
8771   // The immediate value will be created with either MOVZ, MOVN, or ORR.
8772   if (!IsLegal && (VT == MVT::f64 || VT == MVT::f32)) {
8773     // The cost is actually exactly the same for mov+fmov vs. adrp+ldr;
8774     // however the mov+fmov sequence is always better because of the reduced
8775     // cache pressure. The timings are still the same if you consider
8776     // movw+movk+fmov vs. adrp+ldr (it's one instruction longer, but the
8777     // movw+movk is fused). So we limit up to 2 instrdduction at most.
8778     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
8779     AArch64_IMM::expandMOVImm(ImmInt.getZExtValue(), VT.getSizeInBits(),
8780 			      Insn);
8781     unsigned Limit = (OptForSize ? 1 : (Subtarget->hasFuseLiterals() ? 5 : 2));
8782     IsLegal = Insn.size() <= Limit;
8783   }
8784 
8785   LLVM_DEBUG(dbgs() << (IsLegal ? "Legal " : "Illegal ") << VT.getEVTString()
8786                     << " imm value: "; Imm.dump(););
8787   return IsLegal;
8788 }
8789 
8790 //===----------------------------------------------------------------------===//
8791 //                          AArch64 Optimization Hooks
8792 //===----------------------------------------------------------------------===//
8793 
8794 static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
8795                            SDValue Operand, SelectionDAG &DAG,
8796                            int &ExtraSteps) {
8797   EVT VT = Operand.getValueType();
8798   if ((ST->hasNEON() &&
8799        (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 ||
8800         VT == MVT::f32 || VT == MVT::v1f32 || VT == MVT::v2f32 ||
8801         VT == MVT::v4f32)) ||
8802       (ST->hasSVE() &&
8803        (VT == MVT::nxv8f16 || VT == MVT::nxv4f32 || VT == MVT::nxv2f64))) {
8804     if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
8805       // For the reciprocal estimates, convergence is quadratic, so the number
8806       // of digits is doubled after each iteration.  In ARMv8, the accuracy of
8807       // the initial estimate is 2^-8.  Thus the number of extra steps to refine
8808       // the result for float (23 mantissa bits) is 2 and for double (52
8809       // mantissa bits) is 3.
8810       ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2;
8811 
8812     return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
8813   }
8814 
8815   return SDValue();
8816 }
8817 
8818 SDValue
8819 AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
8820                                         const DenormalMode &Mode) const {
8821   SDLoc DL(Op);
8822   EVT VT = Op.getValueType();
8823   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
8824   SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
8825   return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
8826 }
8827 
8828 SDValue
8829 AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
8830                                                    SelectionDAG &DAG) const {
8831   return Op;
8832 }
8833 
8834 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
8835                                                SelectionDAG &DAG, int Enabled,
8836                                                int &ExtraSteps,
8837                                                bool &UseOneConst,
8838                                                bool Reciprocal) const {
8839   if (Enabled == ReciprocalEstimate::Enabled ||
8840       (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt()))
8841     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand,
8842                                        DAG, ExtraSteps)) {
8843       SDLoc DL(Operand);
8844       EVT VT = Operand.getValueType();
8845 
8846       SDNodeFlags Flags;
8847       Flags.setAllowReassociation(true);
8848 
8849       // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2)
8850       // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
8851       for (int i = ExtraSteps; i > 0; --i) {
8852         SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
8853                                    Flags);
8854         Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
8855         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8856       }
8857       if (!Reciprocal)
8858         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
8859 
8860       ExtraSteps = 0;
8861       return Estimate;
8862     }
8863 
8864   return SDValue();
8865 }
8866 
8867 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
8868                                                 SelectionDAG &DAG, int Enabled,
8869                                                 int &ExtraSteps) const {
8870   if (Enabled == ReciprocalEstimate::Enabled)
8871     if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand,
8872                                        DAG, ExtraSteps)) {
8873       SDLoc DL(Operand);
8874       EVT VT = Operand.getValueType();
8875 
8876       SDNodeFlags Flags;
8877       Flags.setAllowReassociation(true);
8878 
8879       // Newton reciprocal iteration: E * (2 - X * E)
8880       // AArch64 reciprocal iteration instruction: (2 - M * N)
8881       for (int i = ExtraSteps; i > 0; --i) {
8882         SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
8883                                    Estimate, Flags);
8884         Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
8885       }
8886 
8887       ExtraSteps = 0;
8888       return Estimate;
8889     }
8890 
8891   return SDValue();
8892 }
8893 
8894 //===----------------------------------------------------------------------===//
8895 //                          AArch64 Inline Assembly Support
8896 //===----------------------------------------------------------------------===//
8897 
8898 // Table of Constraints
8899 // TODO: This is the current set of constraints supported by ARM for the
8900 // compiler, not all of them may make sense.
8901 //
8902 // r - A general register
8903 // w - An FP/SIMD register of some size in the range v0-v31
8904 // x - An FP/SIMD register of some size in the range v0-v15
8905 // I - Constant that can be used with an ADD instruction
8906 // J - Constant that can be used with a SUB instruction
8907 // K - Constant that can be used with a 32-bit logical instruction
8908 // L - Constant that can be used with a 64-bit logical instruction
8909 // M - Constant that can be used as a 32-bit MOV immediate
8910 // N - Constant that can be used as a 64-bit MOV immediate
8911 // Q - A memory reference with base register and no offset
8912 // S - A symbolic address
8913 // Y - Floating point constant zero
8914 // Z - Integer constant zero
8915 //
8916 //   Note that general register operands will be output using their 64-bit x
8917 // register name, whatever the size of the variable, unless the asm operand
8918 // is prefixed by the %w modifier. Floating-point and SIMD register operands
8919 // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
8920 // %q modifier.
8921 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
8922   // At this point, we have to lower this constraint to something else, so we
8923   // lower it to an "r" or "w". However, by doing this we will force the result
8924   // to be in register, while the X constraint is much more permissive.
8925   //
8926   // Although we are correct (we are free to emit anything, without
8927   // constraints), we might break use cases that would expect us to be more
8928   // efficient and emit something else.
8929   if (!Subtarget->hasFPARMv8())
8930     return "r";
8931 
8932   if (ConstraintVT.isFloatingPoint())
8933     return "w";
8934 
8935   if (ConstraintVT.isVector() &&
8936      (ConstraintVT.getSizeInBits() == 64 ||
8937       ConstraintVT.getSizeInBits() == 128))
8938     return "w";
8939 
8940   return "r";
8941 }
8942 
8943 enum PredicateConstraint {
8944   Upl,
8945   Upa,
8946   Invalid
8947 };
8948 
8949 static PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
8950   PredicateConstraint P = PredicateConstraint::Invalid;
8951   if (Constraint == "Upa")
8952     P = PredicateConstraint::Upa;
8953   if (Constraint == "Upl")
8954     P = PredicateConstraint::Upl;
8955   return P;
8956 }
8957 
8958 /// getConstraintType - Given a constraint letter, return the type of
8959 /// constraint it is for this target.
8960 AArch64TargetLowering::ConstraintType
8961 AArch64TargetLowering::getConstraintType(StringRef Constraint) const {
8962   if (Constraint.size() == 1) {
8963     switch (Constraint[0]) {
8964     default:
8965       break;
8966     case 'x':
8967     case 'w':
8968     case 'y':
8969       return C_RegisterClass;
8970     // An address with a single base register. Due to the way we
8971     // currently handle addresses it is the same as 'r'.
8972     case 'Q':
8973       return C_Memory;
8974     case 'I':
8975     case 'J':
8976     case 'K':
8977     case 'L':
8978     case 'M':
8979     case 'N':
8980     case 'Y':
8981     case 'Z':
8982       return C_Immediate;
8983     case 'z':
8984     case 'S': // A symbolic address
8985       return C_Other;
8986     }
8987   } else if (parsePredicateConstraint(Constraint) !=
8988              PredicateConstraint::Invalid)
8989       return C_RegisterClass;
8990   return TargetLowering::getConstraintType(Constraint);
8991 }
8992 
8993 /// Examine constraint type and operand type and determine a weight value.
8994 /// This object must already have been set up with the operand type
8995 /// and the current alternative constraint selected.
8996 TargetLowering::ConstraintWeight
8997 AArch64TargetLowering::getSingleConstraintMatchWeight(
8998     AsmOperandInfo &info, const char *constraint) const {
8999   ConstraintWeight weight = CW_Invalid;
9000   Value *CallOperandVal = info.CallOperandVal;
9001   // If we don't have a value, we can't do a match,
9002   // but allow it at the lowest weight.
9003   if (!CallOperandVal)
9004     return CW_Default;
9005   Type *type = CallOperandVal->getType();
9006   // Look at the constraint type.
9007   switch (*constraint) {
9008   default:
9009     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
9010     break;
9011   case 'x':
9012   case 'w':
9013   case 'y':
9014     if (type->isFloatingPointTy() || type->isVectorTy())
9015       weight = CW_Register;
9016     break;
9017   case 'z':
9018     weight = CW_Constant;
9019     break;
9020   case 'U':
9021     if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
9022       weight = CW_Register;
9023     break;
9024   }
9025   return weight;
9026 }
9027 
9028 std::pair<unsigned, const TargetRegisterClass *>
9029 AArch64TargetLowering::getRegForInlineAsmConstraint(
9030     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
9031   if (Constraint.size() == 1) {
9032     switch (Constraint[0]) {
9033     case 'r':
9034       if (VT.isScalableVector())
9035         return std::make_pair(0U, nullptr);
9036       if (Subtarget->hasLS64() && VT.getSizeInBits() == 512)
9037         return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass);
9038       if (VT.getFixedSizeInBits() == 64)
9039         return std::make_pair(0U, &AArch64::GPR64commonRegClass);
9040       return std::make_pair(0U, &AArch64::GPR32commonRegClass);
9041     case 'w': {
9042       if (!Subtarget->hasFPARMv8())
9043         break;
9044       if (VT.isScalableVector()) {
9045         if (VT.getVectorElementType() != MVT::i1)
9046           return std::make_pair(0U, &AArch64::ZPRRegClass);
9047         return std::make_pair(0U, nullptr);
9048       }
9049       uint64_t VTSize = VT.getFixedSizeInBits();
9050       if (VTSize == 16)
9051         return std::make_pair(0U, &AArch64::FPR16RegClass);
9052       if (VTSize == 32)
9053         return std::make_pair(0U, &AArch64::FPR32RegClass);
9054       if (VTSize == 64)
9055         return std::make_pair(0U, &AArch64::FPR64RegClass);
9056       if (VTSize == 128)
9057         return std::make_pair(0U, &AArch64::FPR128RegClass);
9058       break;
9059     }
9060     // The instructions that this constraint is designed for can
9061     // only take 128-bit registers so just use that regclass.
9062     case 'x':
9063       if (!Subtarget->hasFPARMv8())
9064         break;
9065       if (VT.isScalableVector())
9066         return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
9067       if (VT.getSizeInBits() == 128)
9068         return std::make_pair(0U, &AArch64::FPR128_loRegClass);
9069       break;
9070     case 'y':
9071       if (!Subtarget->hasFPARMv8())
9072         break;
9073       if (VT.isScalableVector())
9074         return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
9075       break;
9076     }
9077   } else {
9078     PredicateConstraint PC = parsePredicateConstraint(Constraint);
9079     if (PC != PredicateConstraint::Invalid) {
9080       if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
9081         return std::make_pair(0U, nullptr);
9082       bool restricted = (PC == PredicateConstraint::Upl);
9083       return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
9084                         : std::make_pair(0U, &AArch64::PPRRegClass);
9085     }
9086   }
9087   if (StringRef("{cc}").equals_insensitive(Constraint))
9088     return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
9089 
9090   // Use the default implementation in TargetLowering to convert the register
9091   // constraint into a member of a register class.
9092   std::pair<unsigned, const TargetRegisterClass *> Res;
9093   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9094 
9095   // Not found as a standard register?
9096   if (!Res.second) {
9097     unsigned Size = Constraint.size();
9098     if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
9099         tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
9100       int RegNo;
9101       bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
9102       if (!Failed && RegNo >= 0 && RegNo <= 31) {
9103         // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
9104         // By default we'll emit v0-v31 for this unless there's a modifier where
9105         // we'll emit the correct register as well.
9106         if (VT != MVT::Other && VT.getSizeInBits() == 64) {
9107           Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
9108           Res.second = &AArch64::FPR64RegClass;
9109         } else {
9110           Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
9111           Res.second = &AArch64::FPR128RegClass;
9112         }
9113       }
9114     }
9115   }
9116 
9117   if (Res.second && !Subtarget->hasFPARMv8() &&
9118       !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
9119       !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
9120     return std::make_pair(0U, nullptr);
9121 
9122   return Res;
9123 }
9124 
9125 EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
9126                                                   llvm::Type *Ty,
9127                                                   bool AllowUnknown) const {
9128   if (Subtarget->hasLS64() && Ty->isIntegerTy(512))
9129     return EVT(MVT::i64x8);
9130 
9131   return TargetLowering::getAsmOperandValueType(DL, Ty, AllowUnknown);
9132 }
9133 
9134 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
9135 /// vector.  If it is invalid, don't add anything to Ops.
9136 void AArch64TargetLowering::LowerAsmOperandForConstraint(
9137     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9138     SelectionDAG &DAG) const {
9139   SDValue Result;
9140 
9141   // Currently only support length 1 constraints.
9142   if (Constraint.length() != 1)
9143     return;
9144 
9145   char ConstraintLetter = Constraint[0];
9146   switch (ConstraintLetter) {
9147   default:
9148     break;
9149 
9150   // This set of constraints deal with valid constants for various instructions.
9151   // Validate and return a target constant for them if we can.
9152   case 'z': {
9153     // 'z' maps to xzr or wzr so it needs an input of 0.
9154     if (!isNullConstant(Op))
9155       return;
9156 
9157     if (Op.getValueType() == MVT::i64)
9158       Result = DAG.getRegister(AArch64::XZR, MVT::i64);
9159     else
9160       Result = DAG.getRegister(AArch64::WZR, MVT::i32);
9161     break;
9162   }
9163   case 'S': {
9164     // An absolute symbolic address or label reference.
9165     if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9166       Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9167                                           GA->getValueType(0));
9168     } else if (const BlockAddressSDNode *BA =
9169                    dyn_cast<BlockAddressSDNode>(Op)) {
9170       Result =
9171           DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0));
9172     } else
9173       return;
9174     break;
9175   }
9176 
9177   case 'I':
9178   case 'J':
9179   case 'K':
9180   case 'L':
9181   case 'M':
9182   case 'N':
9183     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
9184     if (!C)
9185       return;
9186 
9187     // Grab the value and do some validation.
9188     uint64_t CVal = C->getZExtValue();
9189     switch (ConstraintLetter) {
9190     // The I constraint applies only to simple ADD or SUB immediate operands:
9191     // i.e. 0 to 4095 with optional shift by 12
9192     // The J constraint applies only to ADD or SUB immediates that would be
9193     // valid when negated, i.e. if [an add pattern] were to be output as a SUB
9194     // instruction [or vice versa], in other words -1 to -4095 with optional
9195     // left shift by 12.
9196     case 'I':
9197       if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
9198         break;
9199       return;
9200     case 'J': {
9201       uint64_t NVal = -C->getSExtValue();
9202       if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
9203         CVal = C->getSExtValue();
9204         break;
9205       }
9206       return;
9207     }
9208     // The K and L constraints apply *only* to logical immediates, including
9209     // what used to be the MOVI alias for ORR (though the MOVI alias has now
9210     // been removed and MOV should be used). So these constraints have to
9211     // distinguish between bit patterns that are valid 32-bit or 64-bit
9212     // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but
9213     // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice
9214     // versa.
9215     case 'K':
9216       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9217         break;
9218       return;
9219     case 'L':
9220       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9221         break;
9222       return;
9223     // The M and N constraints are a superset of K and L respectively, for use
9224     // with the MOV (immediate) alias. As well as the logical immediates they
9225     // also match 32 or 64-bit immediates that can be loaded either using a
9226     // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca
9227     // (M) or 64-bit 0x1234000000000000 (N) etc.
9228     // As a note some of this code is liberally stolen from the asm parser.
9229     case 'M': {
9230       if (!isUInt<32>(CVal))
9231         return;
9232       if (AArch64_AM::isLogicalImmediate(CVal, 32))
9233         break;
9234       if ((CVal & 0xFFFF) == CVal)
9235         break;
9236       if ((CVal & 0xFFFF0000ULL) == CVal)
9237         break;
9238       uint64_t NCVal = ~(uint32_t)CVal;
9239       if ((NCVal & 0xFFFFULL) == NCVal)
9240         break;
9241       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9242         break;
9243       return;
9244     }
9245     case 'N': {
9246       if (AArch64_AM::isLogicalImmediate(CVal, 64))
9247         break;
9248       if ((CVal & 0xFFFFULL) == CVal)
9249         break;
9250       if ((CVal & 0xFFFF0000ULL) == CVal)
9251         break;
9252       if ((CVal & 0xFFFF00000000ULL) == CVal)
9253         break;
9254       if ((CVal & 0xFFFF000000000000ULL) == CVal)
9255         break;
9256       uint64_t NCVal = ~CVal;
9257       if ((NCVal & 0xFFFFULL) == NCVal)
9258         break;
9259       if ((NCVal & 0xFFFF0000ULL) == NCVal)
9260         break;
9261       if ((NCVal & 0xFFFF00000000ULL) == NCVal)
9262         break;
9263       if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
9264         break;
9265       return;
9266     }
9267     default:
9268       return;
9269     }
9270 
9271     // All assembler immediates are 64-bit integers.
9272     Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
9273     break;
9274   }
9275 
9276   if (Result.getNode()) {
9277     Ops.push_back(Result);
9278     return;
9279   }
9280 
9281   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9282 }
9283 
9284 //===----------------------------------------------------------------------===//
9285 //                     AArch64 Advanced SIMD Support
9286 //===----------------------------------------------------------------------===//
9287 
9288 /// WidenVector - Given a value in the V64 register class, produce the
9289 /// equivalent value in the V128 register class.
9290 static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
9291   EVT VT = V64Reg.getValueType();
9292   unsigned NarrowSize = VT.getVectorNumElements();
9293   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9294   MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
9295   SDLoc DL(V64Reg);
9296 
9297   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
9298                      V64Reg, DAG.getConstant(0, DL, MVT::i64));
9299 }
9300 
9301 /// getExtFactor - Determine the adjustment factor for the position when
9302 /// generating an "extract from vector registers" instruction.
9303 static unsigned getExtFactor(SDValue &V) {
9304   EVT EltType = V.getValueType().getVectorElementType();
9305   return EltType.getSizeInBits() / 8;
9306 }
9307 
9308 /// NarrowVector - Given a value in the V128 register class, produce the
9309 /// equivalent value in the V64 register class.
9310 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
9311   EVT VT = V128Reg.getValueType();
9312   unsigned WideSize = VT.getVectorNumElements();
9313   MVT EltTy = VT.getVectorElementType().getSimpleVT();
9314   MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
9315   SDLoc DL(V128Reg);
9316 
9317   return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg);
9318 }
9319 
9320 // Gather data to see if the operation can be modelled as a
9321 // shuffle in combination with VEXTs.
9322 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
9323                                                   SelectionDAG &DAG) const {
9324   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
9325   LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n");
9326   SDLoc dl(Op);
9327   EVT VT = Op.getValueType();
9328   assert(!VT.isScalableVector() &&
9329          "Scalable vectors cannot be used with ISD::BUILD_VECTOR");
9330   unsigned NumElts = VT.getVectorNumElements();
9331 
9332   struct ShuffleSourceInfo {
9333     SDValue Vec;
9334     unsigned MinElt;
9335     unsigned MaxElt;
9336 
9337     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
9338     // be compatible with the shuffle we intend to construct. As a result
9339     // ShuffleVec will be some sliding window into the original Vec.
9340     SDValue ShuffleVec;
9341 
9342     // Code should guarantee that element i in Vec starts at element "WindowBase
9343     // + i * WindowScale in ShuffleVec".
9344     int WindowBase;
9345     int WindowScale;
9346 
9347     ShuffleSourceInfo(SDValue Vec)
9348       : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
9349           ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
9350 
9351     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
9352   };
9353 
9354   // First gather all vectors used as an immediate source for this BUILD_VECTOR
9355   // node.
9356   SmallVector<ShuffleSourceInfo, 2> Sources;
9357   for (unsigned i = 0; i < NumElts; ++i) {
9358     SDValue V = Op.getOperand(i);
9359     if (V.isUndef())
9360       continue;
9361     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9362              !isa<ConstantSDNode>(V.getOperand(1)) ||
9363              V.getOperand(0).getValueType().isScalableVector()) {
9364       LLVM_DEBUG(
9365           dbgs() << "Reshuffle failed: "
9366                     "a shuffle can only come from building a vector from "
9367                     "various elements of other fixed-width vectors, provided "
9368                     "their indices are constant\n");
9369       return SDValue();
9370     }
9371 
9372     // Add this element source to the list if it's not already there.
9373     SDValue SourceVec = V.getOperand(0);
9374     auto Source = find(Sources, SourceVec);
9375     if (Source == Sources.end())
9376       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
9377 
9378     // Update the minimum and maximum lane number seen.
9379     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
9380     Source->MinElt = std::min(Source->MinElt, EltNo);
9381     Source->MaxElt = std::max(Source->MaxElt, EltNo);
9382   }
9383 
9384   // If we have 3 or 4 sources, try to generate a TBL, which will at least be
9385   // better than moving to/from gpr registers for larger vectors.
9386   if ((Sources.size() == 3 || Sources.size() == 4) && NumElts > 4) {
9387     // Construct a mask for the tbl. We may need to adjust the index for types
9388     // larger than i8.
9389     SmallVector<unsigned, 16> Mask;
9390     unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
9391     for (unsigned I = 0; I < NumElts; ++I) {
9392       SDValue V = Op.getOperand(I);
9393       if (V.isUndef()) {
9394         for (unsigned OF = 0; OF < OutputFactor; OF++)
9395           Mask.push_back(-1);
9396         continue;
9397       }
9398       // Set the Mask lanes adjusted for the size of the input and output
9399       // lanes. The Mask is always i8, so it will set OutputFactor lanes per
9400       // output element, adjusted in their positions per input and output types.
9401       unsigned Lane = V.getConstantOperandVal(1);
9402       for (unsigned S = 0; S < Sources.size(); S++) {
9403         if (V.getOperand(0) == Sources[S].Vec) {
9404           unsigned InputSize = Sources[S].Vec.getScalarValueSizeInBits();
9405           unsigned InputBase = 16 * S + Lane * InputSize / 8;
9406           for (unsigned OF = 0; OF < OutputFactor; OF++)
9407             Mask.push_back(InputBase + OF);
9408           break;
9409         }
9410       }
9411     }
9412 
9413     // Construct the tbl3/tbl4 out of an intrinsic, the sources converted to
9414     // v16i8, and the TBLMask
9415     SmallVector<SDValue, 16> TBLOperands;
9416     TBLOperands.push_back(DAG.getConstant(Sources.size() == 3
9417                                               ? Intrinsic::aarch64_neon_tbl3
9418                                               : Intrinsic::aarch64_neon_tbl4,
9419                                           dl, MVT::i32));
9420     for (unsigned i = 0; i < Sources.size(); i++) {
9421       SDValue Src = Sources[i].Vec;
9422       EVT SrcVT = Src.getValueType();
9423       Src = DAG.getBitcast(SrcVT.is64BitVector() ? MVT::v8i8 : MVT::v16i8, Src);
9424       assert((SrcVT.is64BitVector() || SrcVT.is128BitVector()) &&
9425              "Expected a legally typed vector");
9426       if (SrcVT.is64BitVector())
9427         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, Src,
9428                           DAG.getUNDEF(MVT::v8i8));
9429       TBLOperands.push_back(Src);
9430     }
9431 
9432     SmallVector<SDValue, 16> TBLMask;
9433     for (unsigned i = 0; i < Mask.size(); i++)
9434       TBLMask.push_back(DAG.getConstant(Mask[i], dl, MVT::i32));
9435     assert((Mask.size() == 8 || Mask.size() == 16) &&
9436            "Expected a v8i8 or v16i8 Mask");
9437     TBLOperands.push_back(
9438         DAG.getBuildVector(Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, dl, TBLMask));
9439 
9440     SDValue Shuffle =
9441         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
9442                     Mask.size() == 8 ? MVT::v8i8 : MVT::v16i8, TBLOperands);
9443     return DAG.getBitcast(VT, Shuffle);
9444   }
9445 
9446   if (Sources.size() > 2) {
9447     LLVM_DEBUG(dbgs() << "Reshuffle failed: currently only do something "
9448                       << "sensible when at most two source vectors are "
9449                       << "involved\n");
9450     return SDValue();
9451   }
9452 
9453   // Find out the smallest element size among result and two sources, and use
9454   // it as element size to build the shuffle_vector.
9455   EVT SmallestEltTy = VT.getVectorElementType();
9456   for (auto &Source : Sources) {
9457     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
9458     if (SrcEltTy.bitsLT(SmallestEltTy)) {
9459       SmallestEltTy = SrcEltTy;
9460     }
9461   }
9462   unsigned ResMultiplier =
9463       VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9464   uint64_t VTSize = VT.getFixedSizeInBits();
9465   NumElts = VTSize / SmallestEltTy.getFixedSizeInBits();
9466   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
9467 
9468   // If the source vector is too wide or too narrow, we may nevertheless be able
9469   // to construct a compatible shuffle either by concatenating it with UNDEF or
9470   // extracting a suitable range of elements.
9471   for (auto &Src : Sources) {
9472     EVT SrcVT = Src.ShuffleVec.getValueType();
9473 
9474     TypeSize SrcVTSize = SrcVT.getSizeInBits();
9475     if (SrcVTSize == TypeSize::Fixed(VTSize))
9476       continue;
9477 
9478     // This stage of the search produces a source with the same element type as
9479     // the original, but with a total width matching the BUILD_VECTOR output.
9480     EVT EltVT = SrcVT.getVectorElementType();
9481     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
9482     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
9483 
9484     if (SrcVTSize.getFixedValue() < VTSize) {
9485       assert(2 * SrcVTSize == VTSize);
9486       // We can pad out the smaller vector for free, so if it's part of a
9487       // shuffle...
9488       Src.ShuffleVec =
9489           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
9490                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
9491       continue;
9492     }
9493 
9494     if (SrcVTSize.getFixedValue() != 2 * VTSize) {
9495       LLVM_DEBUG(
9496           dbgs() << "Reshuffle failed: result vector too small to extract\n");
9497       return SDValue();
9498     }
9499 
9500     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
9501       LLVM_DEBUG(
9502           dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n");
9503       return SDValue();
9504     }
9505 
9506     if (Src.MinElt >= NumSrcElts) {
9507       // The extraction can just take the second half
9508       Src.ShuffleVec =
9509           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9510                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9511       Src.WindowBase = -NumSrcElts;
9512     } else if (Src.MaxElt < NumSrcElts) {
9513       // The extraction can just take the first half
9514       Src.ShuffleVec =
9515           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9516                       DAG.getConstant(0, dl, MVT::i64));
9517     } else {
9518       // An actual VEXT is needed
9519       SDValue VEXTSrc1 =
9520           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9521                       DAG.getConstant(0, dl, MVT::i64));
9522       SDValue VEXTSrc2 =
9523           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
9524                       DAG.getConstant(NumSrcElts, dl, MVT::i64));
9525       unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
9526 
9527       if (!SrcVT.is64BitVector()) {
9528         LLVM_DEBUG(
9529           dbgs() << "Reshuffle failed: don't know how to lower AArch64ISD::EXT "
9530                     "for SVE vectors.");
9531         return SDValue();
9532       }
9533 
9534       Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
9535                                    VEXTSrc2,
9536                                    DAG.getConstant(Imm, dl, MVT::i32));
9537       Src.WindowBase = -Src.MinElt;
9538     }
9539   }
9540 
9541   // Another possible incompatibility occurs from the vector element types. We
9542   // can fix this by bitcasting the source vectors to the same type we intend
9543   // for the shuffle.
9544   for (auto &Src : Sources) {
9545     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
9546     if (SrcEltTy == SmallestEltTy)
9547       continue;
9548     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
9549     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
9550     Src.WindowScale =
9551         SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits();
9552     Src.WindowBase *= Src.WindowScale;
9553   }
9554 
9555   // Final check before we try to actually produce a shuffle.
9556   LLVM_DEBUG(for (auto Src
9557                   : Sources)
9558                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
9559 
9560   // The stars all align, our next step is to produce the mask for the shuffle.
9561   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
9562   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
9563   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
9564     SDValue Entry = Op.getOperand(i);
9565     if (Entry.isUndef())
9566       continue;
9567 
9568     auto Src = find(Sources, Entry.getOperand(0));
9569     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
9570 
9571     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
9572     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
9573     // segment.
9574     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
9575     int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(),
9576                                VT.getScalarSizeInBits());
9577     int LanesDefined = BitsDefined / BitsPerShuffleLane;
9578 
9579     // This source is expected to fill ResMultiplier lanes of the final shuffle,
9580     // starting at the appropriate offset.
9581     int *LaneMask = &Mask[i * ResMultiplier];
9582 
9583     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
9584     ExtractBase += NumElts * (Src - Sources.begin());
9585     for (int j = 0; j < LanesDefined; ++j)
9586       LaneMask[j] = ExtractBase + j;
9587   }
9588 
9589   // Final check before we try to produce nonsense...
9590   if (!isShuffleMaskLegal(Mask, ShuffleVT)) {
9591     LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n");
9592     return SDValue();
9593   }
9594 
9595   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
9596   for (unsigned i = 0; i < Sources.size(); ++i)
9597     ShuffleOps[i] = Sources[i].ShuffleVec;
9598 
9599   SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
9600                                          ShuffleOps[1], Mask);
9601   SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
9602 
9603   LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump();
9604              dbgs() << "Reshuffle, creating node: "; V.dump(););
9605 
9606   return V;
9607 }
9608 
9609 // check if an EXT instruction can handle the shuffle mask when the
9610 // vector sources of the shuffle are the same.
9611 static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
9612   unsigned NumElts = VT.getVectorNumElements();
9613 
9614   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
9615   if (M[0] < 0)
9616     return false;
9617 
9618   Imm = M[0];
9619 
9620   // If this is a VEXT shuffle, the immediate value is the index of the first
9621   // element.  The other shuffle indices must be the successive elements after
9622   // the first one.
9623   unsigned ExpectedElt = Imm;
9624   for (unsigned i = 1; i < NumElts; ++i) {
9625     // Increment the expected index.  If it wraps around, just follow it
9626     // back to index zero and keep going.
9627     ++ExpectedElt;
9628     if (ExpectedElt == NumElts)
9629       ExpectedElt = 0;
9630 
9631     if (M[i] < 0)
9632       continue; // ignore UNDEF indices
9633     if (ExpectedElt != static_cast<unsigned>(M[i]))
9634       return false;
9635   }
9636 
9637   return true;
9638 }
9639 
9640 // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
9641 // v4i32s. This is really a truncate, which we can construct out of (legal)
9642 // concats and truncate nodes.
9643 static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {
9644   if (V.getValueType() != MVT::v16i8)
9645     return SDValue();
9646   assert(V.getNumOperands() == 16 && "Expected 16 operands on the BUILDVECTOR");
9647 
9648   for (unsigned X = 0; X < 4; X++) {
9649     // Check the first item in each group is an extract from lane 0 of a v4i32
9650     // or v4i16.
9651     SDValue BaseExt = V.getOperand(X * 4);
9652     if (BaseExt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9653         (BaseExt.getOperand(0).getValueType() != MVT::v4i16 &&
9654          BaseExt.getOperand(0).getValueType() != MVT::v4i32) ||
9655         !isa<ConstantSDNode>(BaseExt.getOperand(1)) ||
9656         BaseExt.getConstantOperandVal(1) != 0)
9657       return SDValue();
9658     SDValue Base = BaseExt.getOperand(0);
9659     // And check the other items are extracts from the same vector.
9660     for (unsigned Y = 1; Y < 4; Y++) {
9661       SDValue Ext = V.getOperand(X * 4 + Y);
9662       if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9663           Ext.getOperand(0) != Base ||
9664           !isa<ConstantSDNode>(Ext.getOperand(1)) ||
9665           Ext.getConstantOperandVal(1) != Y)
9666         return SDValue();
9667     }
9668   }
9669 
9670   // Turn the buildvector into a series of truncates and concates, which will
9671   // become uzip1's. Any v4i32s we found get truncated to v4i16, which are
9672   // concat together to produce 2 v8i16. These are both truncated and concat
9673   // together.
9674   SDLoc DL(V);
9675   SDValue Trunc[4] = {
9676       V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0),
9677       V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)};
9678   for (int I = 0; I < 4; I++)
9679     if (Trunc[I].getValueType() == MVT::v4i32)
9680       Trunc[I] = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i16, Trunc[I]);
9681   SDValue Concat0 =
9682       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]);
9683   SDValue Concat1 =
9684       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[2], Trunc[3]);
9685   SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat0);
9686   SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, Concat1);
9687   return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Trunc0, Trunc1);
9688 }
9689 
9690 /// Check if a vector shuffle corresponds to a DUP instructions with a larger
9691 /// element width than the vector lane type. If that is the case the function
9692 /// returns true and writes the value of the DUP instruction lane operand into
9693 /// DupLaneOp
9694 static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
9695                           unsigned &DupLaneOp) {
9696   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9697          "Only possible block sizes for wide DUP are: 16, 32, 64");
9698 
9699   if (BlockSize <= VT.getScalarSizeInBits())
9700     return false;
9701   if (BlockSize % VT.getScalarSizeInBits() != 0)
9702     return false;
9703   if (VT.getSizeInBits() % BlockSize != 0)
9704     return false;
9705 
9706   size_t SingleVecNumElements = VT.getVectorNumElements();
9707   size_t NumEltsPerBlock = BlockSize / VT.getScalarSizeInBits();
9708   size_t NumBlocks = VT.getSizeInBits() / BlockSize;
9709 
9710   // We are looking for masks like
9711   // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element
9712   // might be replaced by 'undefined'. BlockIndices will eventually contain
9713   // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7]
9714   // for the above examples)
9715   SmallVector<int, 8> BlockElts(NumEltsPerBlock, -1);
9716   for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++)
9717     for (size_t I = 0; I < NumEltsPerBlock; I++) {
9718       int Elt = M[BlockIndex * NumEltsPerBlock + I];
9719       if (Elt < 0)
9720         continue;
9721       // For now we don't support shuffles that use the second operand
9722       if ((unsigned)Elt >= SingleVecNumElements)
9723         return false;
9724       if (BlockElts[I] < 0)
9725         BlockElts[I] = Elt;
9726       else if (BlockElts[I] != Elt)
9727         return false;
9728     }
9729 
9730   // We found a candidate block (possibly with some undefs). It must be a
9731   // sequence of consecutive integers starting with a value divisible by
9732   // NumEltsPerBlock with some values possibly replaced by undef-s.
9733 
9734   // Find first non-undef element
9735   auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; });
9736   assert(FirstRealEltIter != BlockElts.end() &&
9737          "Shuffle with all-undefs must have been caught by previous cases, "
9738          "e.g. isSplat()");
9739   if (FirstRealEltIter == BlockElts.end()) {
9740     DupLaneOp = 0;
9741     return true;
9742   }
9743 
9744   // Index of FirstRealElt in BlockElts
9745   size_t FirstRealIndex = FirstRealEltIter - BlockElts.begin();
9746 
9747   if ((unsigned)*FirstRealEltIter < FirstRealIndex)
9748     return false;
9749   // BlockElts[0] must have the following value if it isn't undef:
9750   size_t Elt0 = *FirstRealEltIter - FirstRealIndex;
9751 
9752   // Check the first element
9753   if (Elt0 % NumEltsPerBlock != 0)
9754     return false;
9755   // Check that the sequence indeed consists of consecutive integers (modulo
9756   // undefs)
9757   for (size_t I = 0; I < NumEltsPerBlock; I++)
9758     if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I)
9759       return false;
9760 
9761   DupLaneOp = Elt0 / NumEltsPerBlock;
9762   return true;
9763 }
9764 
9765 // check if an EXT instruction can handle the shuffle mask when the
9766 // vector sources of the shuffle are different.
9767 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
9768                       unsigned &Imm) {
9769   // Look for the first non-undef element.
9770   const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
9771 
9772   // Benefit form APInt to handle overflow when calculating expected element.
9773   unsigned NumElts = VT.getVectorNumElements();
9774   unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
9775   APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
9776   // The following shuffle indices must be the successive elements after the
9777   // first real element.
9778   const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(),
9779       [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;});
9780   if (FirstWrongElt != M.end())
9781     return false;
9782 
9783   // The index of an EXT is the first element if it is not UNDEF.
9784   // Watch out for the beginning UNDEFs. The EXT index should be the expected
9785   // value of the first element.  E.g.
9786   // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>.
9787   // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>.
9788   // ExpectedElt is the last mask index plus 1.
9789   Imm = ExpectedElt.getZExtValue();
9790 
9791   // There are two difference cases requiring to reverse input vectors.
9792   // For example, for vector <4 x i32> we have the following cases,
9793   // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>)
9794   // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>)
9795   // For both cases, we finally use mask <5, 6, 7, 0>, which requires
9796   // to reverse two input vectors.
9797   if (Imm < NumElts)
9798     ReverseEXT = true;
9799   else
9800     Imm -= NumElts;
9801 
9802   return true;
9803 }
9804 
9805 /// isREVMask - Check if a vector shuffle corresponds to a REV
9806 /// instruction with the specified blocksize.  (The order of the elements
9807 /// within each block of the vector is reversed.)
9808 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
9809   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
9810          "Only possible block sizes for REV are: 16, 32, 64");
9811 
9812   unsigned EltSz = VT.getScalarSizeInBits();
9813   if (EltSz == 64)
9814     return false;
9815 
9816   unsigned NumElts = VT.getVectorNumElements();
9817   unsigned BlockElts = M[0] + 1;
9818   // If the first shuffle index is UNDEF, be optimistic.
9819   if (M[0] < 0)
9820     BlockElts = BlockSize / EltSz;
9821 
9822   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
9823     return false;
9824 
9825   for (unsigned i = 0; i < NumElts; ++i) {
9826     if (M[i] < 0)
9827       continue; // ignore UNDEF indices
9828     if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
9829       return false;
9830   }
9831 
9832   return true;
9833 }
9834 
9835 static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9836   unsigned NumElts = VT.getVectorNumElements();
9837   if (NumElts % 2 != 0)
9838     return false;
9839   WhichResult = (M[0] == 0 ? 0 : 1);
9840   unsigned Idx = WhichResult * NumElts / 2;
9841   for (unsigned i = 0; i != NumElts; i += 2) {
9842     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9843         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
9844       return false;
9845     Idx += 1;
9846   }
9847 
9848   return true;
9849 }
9850 
9851 static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9852   unsigned NumElts = VT.getVectorNumElements();
9853   WhichResult = (M[0] == 0 ? 0 : 1);
9854   for (unsigned i = 0; i != NumElts; ++i) {
9855     if (M[i] < 0)
9856       continue; // ignore UNDEF indices
9857     if ((unsigned)M[i] != 2 * i + WhichResult)
9858       return false;
9859   }
9860 
9861   return true;
9862 }
9863 
9864 static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9865   unsigned NumElts = VT.getVectorNumElements();
9866   if (NumElts % 2 != 0)
9867     return false;
9868   WhichResult = (M[0] == 0 ? 0 : 1);
9869   for (unsigned i = 0; i < NumElts; i += 2) {
9870     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9871         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult))
9872       return false;
9873   }
9874   return true;
9875 }
9876 
9877 /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
9878 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9879 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
9880 static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9881   unsigned NumElts = VT.getVectorNumElements();
9882   if (NumElts % 2 != 0)
9883     return false;
9884   WhichResult = (M[0] == 0 ? 0 : 1);
9885   unsigned Idx = WhichResult * NumElts / 2;
9886   for (unsigned i = 0; i != NumElts; i += 2) {
9887     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
9888         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx))
9889       return false;
9890     Idx += 1;
9891   }
9892 
9893   return true;
9894 }
9895 
9896 /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
9897 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9898 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
9899 static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9900   unsigned Half = VT.getVectorNumElements() / 2;
9901   WhichResult = (M[0] == 0 ? 0 : 1);
9902   for (unsigned j = 0; j != 2; ++j) {
9903     unsigned Idx = WhichResult;
9904     for (unsigned i = 0; i != Half; ++i) {
9905       int MIdx = M[i + j * Half];
9906       if (MIdx >= 0 && (unsigned)MIdx != Idx)
9907         return false;
9908       Idx += 2;
9909     }
9910   }
9911 
9912   return true;
9913 }
9914 
9915 /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
9916 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
9917 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
9918 static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
9919   unsigned NumElts = VT.getVectorNumElements();
9920   if (NumElts % 2 != 0)
9921     return false;
9922   WhichResult = (M[0] == 0 ? 0 : 1);
9923   for (unsigned i = 0; i < NumElts; i += 2) {
9924     if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) ||
9925         (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult))
9926       return false;
9927   }
9928   return true;
9929 }
9930 
9931 static bool isINSMask(ArrayRef<int> M, int NumInputElements,
9932                       bool &DstIsLeft, int &Anomaly) {
9933   if (M.size() != static_cast<size_t>(NumInputElements))
9934     return false;
9935 
9936   int NumLHSMatch = 0, NumRHSMatch = 0;
9937   int LastLHSMismatch = -1, LastRHSMismatch = -1;
9938 
9939   for (int i = 0; i < NumInputElements; ++i) {
9940     if (M[i] == -1) {
9941       ++NumLHSMatch;
9942       ++NumRHSMatch;
9943       continue;
9944     }
9945 
9946     if (M[i] == i)
9947       ++NumLHSMatch;
9948     else
9949       LastLHSMismatch = i;
9950 
9951     if (M[i] == i + NumInputElements)
9952       ++NumRHSMatch;
9953     else
9954       LastRHSMismatch = i;
9955   }
9956 
9957   if (NumLHSMatch == NumInputElements - 1) {
9958     DstIsLeft = true;
9959     Anomaly = LastLHSMismatch;
9960     return true;
9961   } else if (NumRHSMatch == NumInputElements - 1) {
9962     DstIsLeft = false;
9963     Anomaly = LastRHSMismatch;
9964     return true;
9965   }
9966 
9967   return false;
9968 }
9969 
9970 static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {
9971   if (VT.getSizeInBits() != 128)
9972     return false;
9973 
9974   unsigned NumElts = VT.getVectorNumElements();
9975 
9976   for (int I = 0, E = NumElts / 2; I != E; I++) {
9977     if (Mask[I] != I)
9978       return false;
9979   }
9980 
9981   int Offset = NumElts / 2;
9982   for (int I = NumElts / 2, E = NumElts; I != E; I++) {
9983     if (Mask[I] != I + SplitLHS * Offset)
9984       return false;
9985   }
9986 
9987   return true;
9988 }
9989 
9990 static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
9991   SDLoc DL(Op);
9992   EVT VT = Op.getValueType();
9993   SDValue V0 = Op.getOperand(0);
9994   SDValue V1 = Op.getOperand(1);
9995   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
9996 
9997   if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
9998       VT.getVectorElementType() != V1.getValueType().getVectorElementType())
9999     return SDValue();
10000 
10001   bool SplitV0 = V0.getValueSizeInBits() == 128;
10002 
10003   if (!isConcatMask(Mask, VT, SplitV0))
10004     return SDValue();
10005 
10006   EVT CastVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
10007   if (SplitV0) {
10008     V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
10009                      DAG.getConstant(0, DL, MVT::i64));
10010   }
10011   if (V1.getValueSizeInBits() == 128) {
10012     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
10013                      DAG.getConstant(0, DL, MVT::i64));
10014   }
10015   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
10016 }
10017 
10018 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
10019 /// the specified operations to build the shuffle. ID is the perfect-shuffle
10020 //ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
10021 //table entry and LHS/RHS are the immediate inputs for this stage of the
10022 //shuffle.
10023 static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
10024                                       SDValue V2, unsigned PFEntry, SDValue LHS,
10025                                       SDValue RHS, SelectionDAG &DAG,
10026                                       const SDLoc &dl) {
10027   unsigned OpNum = (PFEntry >> 26) & 0x0F;
10028   unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
10029   unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
10030 
10031   enum {
10032     OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
10033     OP_VREV,
10034     OP_VDUP0,
10035     OP_VDUP1,
10036     OP_VDUP2,
10037     OP_VDUP3,
10038     OP_VEXT1,
10039     OP_VEXT2,
10040     OP_VEXT3,
10041     OP_VUZPL,  // VUZP, left result
10042     OP_VUZPR,  // VUZP, right result
10043     OP_VZIPL,  // VZIP, left result
10044     OP_VZIPR,  // VZIP, right result
10045     OP_VTRNL,  // VTRN, left result
10046     OP_VTRNR,  // VTRN, right result
10047     OP_MOVLANE // Move lane. RHSID is the lane to move into
10048   };
10049 
10050   if (OpNum == OP_COPY) {
10051     if (LHSID == (1 * 9 + 2) * 9 + 3)
10052       return LHS;
10053     assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!");
10054     return RHS;
10055   }
10056 
10057   if (OpNum == OP_MOVLANE) {
10058     // Decompose a PerfectShuffle ID to get the Mask for lane Elt
10059     auto getPFIDLane = [](unsigned ID, int Elt) -> int {
10060       assert(Elt < 4 && "Expected Perfect Lanes to be less than 4");
10061       Elt = 3 - Elt;
10062       while (Elt > 0) {
10063         ID /= 9;
10064         Elt--;
10065       }
10066       return (ID % 9 == 8) ? -1 : ID % 9;
10067     };
10068 
10069     // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We
10070     // get the lane to move from from the PFID, which is always from the
10071     // original vectors (V1 or V2).
10072     SDValue OpLHS = GeneratePerfectShuffle(
10073         LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
10074     EVT VT = OpLHS.getValueType();
10075     assert(RHSID < 8 && "Expected a lane index for RHSID!");
10076     unsigned ExtLane = 0;
10077     SDValue Input;
10078 
10079     // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs
10080     // convert into a higher type.
10081     if (RHSID & 0x4) {
10082       int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1;
10083       if (MaskElt == -1)
10084         MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1;
10085       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10086       ExtLane = MaskElt < 2 ? MaskElt : (MaskElt - 2);
10087       Input = MaskElt < 2 ? V1 : V2;
10088       if (VT.getScalarSizeInBits() == 16) {
10089         Input = DAG.getBitcast(MVT::v2f32, Input);
10090         OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
10091       } else {
10092         assert(VT.getScalarSizeInBits() == 32 &&
10093                "Expected 16 or 32 bit shuffle elemements");
10094         Input = DAG.getBitcast(MVT::v2f64, Input);
10095         OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
10096       }
10097     } else {
10098       int MaskElt = getPFIDLane(ID, RHSID);
10099       assert(MaskElt >= 0 && "Didn't expect an undef movlane index!");
10100       ExtLane = MaskElt < 4 ? MaskElt : (MaskElt - 4);
10101       Input = MaskElt < 4 ? V1 : V2;
10102       // Be careful about creating illegal types. Use f16 instead of i16.
10103       if (VT == MVT::v4i16) {
10104         Input = DAG.getBitcast(MVT::v4f16, Input);
10105         OpLHS = DAG.getBitcast(MVT::v4f16, OpLHS);
10106       }
10107     }
10108     SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
10109                               Input.getValueType().getVectorElementType(),
10110                               Input, DAG.getVectorIdxConstant(ExtLane, dl));
10111     SDValue Ins =
10112         DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Input.getValueType(), OpLHS,
10113                     Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl));
10114     return DAG.getBitcast(VT, Ins);
10115   }
10116 
10117   SDValue OpLHS, OpRHS;
10118   OpLHS = GeneratePerfectShuffle(LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS,
10119                                  RHS, DAG, dl);
10120   OpRHS = GeneratePerfectShuffle(RHSID, V1, V2, PerfectShuffleTable[RHSID], LHS,
10121                                  RHS, DAG, dl);
10122   EVT VT = OpLHS.getValueType();
10123 
10124   switch (OpNum) {
10125   default:
10126     llvm_unreachable("Unknown shuffle opcode!");
10127   case OP_VREV:
10128     // VREV divides the vector in half and swaps within the half.
10129     if (VT.getVectorElementType() == MVT::i32 ||
10130         VT.getVectorElementType() == MVT::f32)
10131       return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS);
10132     // vrev <4 x i16> -> REV32
10133     if (VT.getVectorElementType() == MVT::i16 ||
10134         VT.getVectorElementType() == MVT::f16 ||
10135         VT.getVectorElementType() == MVT::bf16)
10136       return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS);
10137     // vrev <4 x i8> -> REV16
10138     assert(VT.getVectorElementType() == MVT::i8);
10139     return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS);
10140   case OP_VDUP0:
10141   case OP_VDUP1:
10142   case OP_VDUP2:
10143   case OP_VDUP3: {
10144     EVT EltTy = VT.getVectorElementType();
10145     unsigned Opcode;
10146     if (EltTy == MVT::i8)
10147       Opcode = AArch64ISD::DUPLANE8;
10148     else if (EltTy == MVT::i16 || EltTy == MVT::f16 || EltTy == MVT::bf16)
10149       Opcode = AArch64ISD::DUPLANE16;
10150     else if (EltTy == MVT::i32 || EltTy == MVT::f32)
10151       Opcode = AArch64ISD::DUPLANE32;
10152     else if (EltTy == MVT::i64 || EltTy == MVT::f64)
10153       Opcode = AArch64ISD::DUPLANE64;
10154     else
10155       llvm_unreachable("Invalid vector element type?");
10156 
10157     if (VT.getSizeInBits() == 64)
10158       OpLHS = WidenVector(OpLHS, DAG);
10159     SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
10160     return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
10161   }
10162   case OP_VEXT1:
10163   case OP_VEXT2:
10164   case OP_VEXT3: {
10165     unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
10166     return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
10167                        DAG.getConstant(Imm, dl, MVT::i32));
10168   }
10169   case OP_VUZPL:
10170     return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
10171                        OpRHS);
10172   case OP_VUZPR:
10173     return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS,
10174                        OpRHS);
10175   case OP_VZIPL:
10176     return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS,
10177                        OpRHS);
10178   case OP_VZIPR:
10179     return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS,
10180                        OpRHS);
10181   case OP_VTRNL:
10182     return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS,
10183                        OpRHS);
10184   case OP_VTRNR:
10185     return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS,
10186                        OpRHS);
10187   }
10188 }
10189 
10190 static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
10191                            SelectionDAG &DAG) {
10192   // Check to see if we can use the TBL instruction.
10193   SDValue V1 = Op.getOperand(0);
10194   SDValue V2 = Op.getOperand(1);
10195   SDLoc DL(Op);
10196 
10197   EVT EltVT = Op.getValueType().getVectorElementType();
10198   unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
10199 
10200   bool Swap = false;
10201   if (V1.isUndef() || isZerosVector(V1.getNode())) {
10202     std::swap(V1, V2);
10203     Swap = true;
10204   }
10205 
10206   // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
10207   // out of range values with 0s. We do need to make sure that any out-of-range
10208   // values are really out-of-range for a v16i8 vector.
10209   bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
10210   MVT IndexVT = MVT::v8i8;
10211   unsigned IndexLen = 8;
10212   if (Op.getValueSizeInBits() == 128) {
10213     IndexVT = MVT::v16i8;
10214     IndexLen = 16;
10215   }
10216 
10217   SmallVector<SDValue, 8> TBLMask;
10218   for (int Val : ShuffleMask) {
10219     for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
10220       unsigned Offset = Byte + Val * BytesPerElt;
10221       if (Swap)
10222         Offset = Offset < IndexLen ? Offset + IndexLen : Offset - IndexLen;
10223       if (IsUndefOrZero && Offset >= IndexLen)
10224         Offset = 255;
10225       TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
10226     }
10227   }
10228 
10229   SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1);
10230   SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
10231 
10232   SDValue Shuffle;
10233   if (IsUndefOrZero) {
10234     if (IndexLen == 8)
10235       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
10236     Shuffle = DAG.getNode(
10237         ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10238         DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10239         DAG.getBuildVector(IndexVT, DL,
10240                            makeArrayRef(TBLMask.data(), IndexLen)));
10241   } else {
10242     if (IndexLen == 8) {
10243       V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
10244       Shuffle = DAG.getNode(
10245           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10246           DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
10247           DAG.getBuildVector(IndexVT, DL,
10248                              makeArrayRef(TBLMask.data(), IndexLen)));
10249     } else {
10250       // FIXME: We cannot, for the moment, emit a TBL2 instruction because we
10251       // cannot currently represent the register constraints on the input
10252       // table registers.
10253       //  Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
10254       //                   DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
10255       //                   IndexLen));
10256       Shuffle = DAG.getNode(
10257           ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
10258           DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
10259           V2Cst, DAG.getBuildVector(IndexVT, DL,
10260                                     makeArrayRef(TBLMask.data(), IndexLen)));
10261     }
10262   }
10263   return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
10264 }
10265 
10266 static unsigned getDUPLANEOp(EVT EltType) {
10267   if (EltType == MVT::i8)
10268     return AArch64ISD::DUPLANE8;
10269   if (EltType == MVT::i16 || EltType == MVT::f16 || EltType == MVT::bf16)
10270     return AArch64ISD::DUPLANE16;
10271   if (EltType == MVT::i32 || EltType == MVT::f32)
10272     return AArch64ISD::DUPLANE32;
10273   if (EltType == MVT::i64 || EltType == MVT::f64)
10274     return AArch64ISD::DUPLANE64;
10275 
10276   llvm_unreachable("Invalid vector element type?");
10277 }
10278 
10279 static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
10280                             unsigned Opcode, SelectionDAG &DAG) {
10281   // Try to eliminate a bitcasted extract subvector before a DUPLANE.
10282   auto getScaledOffsetDup = [](SDValue BitCast, int &LaneC, MVT &CastVT) {
10283     // Match: dup (bitcast (extract_subv X, C)), LaneC
10284     if (BitCast.getOpcode() != ISD::BITCAST ||
10285         BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR)
10286       return false;
10287 
10288     // The extract index must align in the destination type. That may not
10289     // happen if the bitcast is from narrow to wide type.
10290     SDValue Extract = BitCast.getOperand(0);
10291     unsigned ExtIdx = Extract.getConstantOperandVal(1);
10292     unsigned SrcEltBitWidth = Extract.getScalarValueSizeInBits();
10293     unsigned ExtIdxInBits = ExtIdx * SrcEltBitWidth;
10294     unsigned CastedEltBitWidth = BitCast.getScalarValueSizeInBits();
10295     if (ExtIdxInBits % CastedEltBitWidth != 0)
10296       return false;
10297 
10298     // Can't handle cases where vector size is not 128-bit
10299     if (!Extract.getOperand(0).getValueType().is128BitVector())
10300       return false;
10301 
10302     // Update the lane value by offsetting with the scaled extract index.
10303     LaneC += ExtIdxInBits / CastedEltBitWidth;
10304 
10305     // Determine the casted vector type of the wide vector input.
10306     // dup (bitcast (extract_subv X, C)), LaneC --> dup (bitcast X), LaneC'
10307     // Examples:
10308     // dup (bitcast (extract_subv v2f64 X, 1) to v2f32), 1 --> dup v4f32 X, 3
10309     // dup (bitcast (extract_subv v16i8 X, 8) to v4i16), 1 --> dup v8i16 X, 5
10310     unsigned SrcVecNumElts =
10311         Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth;
10312     CastVT = MVT::getVectorVT(BitCast.getSimpleValueType().getScalarType(),
10313                               SrcVecNumElts);
10314     return true;
10315   };
10316   MVT CastVT;
10317   if (getScaledOffsetDup(V, Lane, CastVT)) {
10318     V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
10319   } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10320              V.getOperand(0).getValueType().is128BitVector()) {
10321     // The lane is incremented by the index of the extract.
10322     // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
10323     Lane += V.getConstantOperandVal(1);
10324     V = V.getOperand(0);
10325   } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
10326     // The lane is decremented if we are splatting from the 2nd operand.
10327     // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1
10328     unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2;
10329     Lane -= Idx * VT.getVectorNumElements() / 2;
10330     V = WidenVector(V.getOperand(Idx), DAG);
10331   } else if (VT.getSizeInBits() == 64) {
10332     // Widen the operand to 128-bit register with undef.
10333     V = WidenVector(V, DAG);
10334   }
10335   return DAG.getNode(Opcode, dl, VT, V, DAG.getConstant(Lane, dl, MVT::i64));
10336 }
10337 
10338 // Return true if we can get a new shuffle mask by checking the parameter mask
10339 // array to test whether every two adjacent mask values are continuous and
10340 // starting from an even number.
10341 static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
10342                            SmallVectorImpl<int> &NewMask) {
10343   unsigned NumElts = VT.getVectorNumElements();
10344   if (NumElts % 2 != 0)
10345     return false;
10346 
10347   NewMask.clear();
10348   for (unsigned i = 0; i < NumElts; i += 2) {
10349     int M0 = M[i];
10350     int M1 = M[i + 1];
10351 
10352     // If both elements are undef, new mask is undef too.
10353     if (M0 == -1 && M1 == -1) {
10354       NewMask.push_back(-1);
10355       continue;
10356     }
10357 
10358     if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
10359       NewMask.push_back(M1 / 2);
10360       continue;
10361     }
10362 
10363     if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
10364       NewMask.push_back(M0 / 2);
10365       continue;
10366     }
10367 
10368     NewMask.clear();
10369     return false;
10370   }
10371 
10372   assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
10373   return true;
10374 }
10375 
10376 // Try to widen element type to get a new mask value for a better permutation
10377 // sequence, so that we can use NEON shuffle instructions, such as zip1/2,
10378 // UZP1/2, TRN1/2, REV, INS, etc.
10379 // For example:
10380 //  shufflevector <4 x i32> %a, <4 x i32> %b,
10381 //                <4 x i32> <i32 6, i32 7, i32 2, i32 3>
10382 // is equivalent to:
10383 //  shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
10384 // Finally, we can get:
10385 //  mov     v0.d[0], v1.d[1]
10386 static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
10387   SDLoc DL(Op);
10388   EVT VT = Op.getValueType();
10389   EVT ScalarVT = VT.getVectorElementType();
10390   unsigned ElementSize = ScalarVT.getFixedSizeInBits();
10391   SDValue V0 = Op.getOperand(0);
10392   SDValue V1 = Op.getOperand(1);
10393   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
10394 
10395   // If combining adjacent elements, like two i16's -> i32, two i32's -> i64 ...
10396   // We need to make sure the wider element type is legal. Thus, ElementSize
10397   // should be not larger than 32 bits, and i1 type should also be excluded.
10398   if (ElementSize > 32 || ElementSize == 1)
10399     return SDValue();
10400 
10401   SmallVector<int, 8> NewMask;
10402   if (isWideTypeMask(Mask, VT, NewMask)) {
10403     MVT NewEltVT = VT.isFloatingPoint()
10404                        ? MVT::getFloatingPointVT(ElementSize * 2)
10405                        : MVT::getIntegerVT(ElementSize * 2);
10406     MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10407     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
10408       V0 = DAG.getBitcast(NewVT, V0);
10409       V1 = DAG.getBitcast(NewVT, V1);
10410       return DAG.getBitcast(VT,
10411                             DAG.getVectorShuffle(NewVT, DL, V0, V1, NewMask));
10412     }
10413   }
10414 
10415   return SDValue();
10416 }
10417 
10418 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
10419                                                    SelectionDAG &DAG) const {
10420   SDLoc dl(Op);
10421   EVT VT = Op.getValueType();
10422 
10423   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
10424 
10425   if (useSVEForFixedLengthVectorVT(VT))
10426     return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
10427 
10428   // Convert shuffles that are directly supported on NEON to target-specific
10429   // DAG nodes, instead of keeping them as shuffles and matching them again
10430   // during code selection.  This is more efficient and avoids the possibility
10431   // of inconsistencies between legalization and selection.
10432   ArrayRef<int> ShuffleMask = SVN->getMask();
10433 
10434   SDValue V1 = Op.getOperand(0);
10435   SDValue V2 = Op.getOperand(1);
10436 
10437   assert(V1.getValueType() == VT && "Unexpected VECTOR_SHUFFLE type!");
10438   assert(ShuffleMask.size() == VT.getVectorNumElements() &&
10439          "Unexpected VECTOR_SHUFFLE mask size!");
10440 
10441   if (SVN->isSplat()) {
10442     int Lane = SVN->getSplatIndex();
10443     // If this is undef splat, generate it via "just" vdup, if possible.
10444     if (Lane == -1)
10445       Lane = 0;
10446 
10447     if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
10448       return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(),
10449                          V1.getOperand(0));
10450     // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non-
10451     // constant. If so, we can just reference the lane's definition directly.
10452     if (V1.getOpcode() == ISD::BUILD_VECTOR &&
10453         !isa<ConstantSDNode>(V1.getOperand(Lane)))
10454       return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane));
10455 
10456     // Otherwise, duplicate from the lane of the input vector.
10457     unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType());
10458     return constructDup(V1, Lane, dl, VT, Opcode, DAG);
10459   }
10460 
10461   // Check if the mask matches a DUP for a wider element
10462   for (unsigned LaneSize : {64U, 32U, 16U}) {
10463     unsigned Lane = 0;
10464     if (isWideDUPMask(ShuffleMask, VT, LaneSize, Lane)) {
10465       unsigned Opcode = LaneSize == 64 ? AArch64ISD::DUPLANE64
10466                                        : LaneSize == 32 ? AArch64ISD::DUPLANE32
10467                                                         : AArch64ISD::DUPLANE16;
10468       // Cast V1 to an integer vector with required lane size
10469       MVT NewEltTy = MVT::getIntegerVT(LaneSize);
10470       unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
10471       MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
10472       V1 = DAG.getBitcast(NewVecTy, V1);
10473       // Constuct the DUP instruction
10474       V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
10475       // Cast back to the original type
10476       return DAG.getBitcast(VT, V1);
10477     }
10478   }
10479 
10480   if (isREVMask(ShuffleMask, VT, 64))
10481     return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2);
10482   if (isREVMask(ShuffleMask, VT, 32))
10483     return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2);
10484   if (isREVMask(ShuffleMask, VT, 16))
10485     return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2);
10486 
10487   if (((VT.getVectorNumElements() == 8 && VT.getScalarSizeInBits() == 16) ||
10488        (VT.getVectorNumElements() == 16 && VT.getScalarSizeInBits() == 8)) &&
10489       ShuffleVectorInst::isReverseMask(ShuffleMask)) {
10490     SDValue Rev = DAG.getNode(AArch64ISD::REV64, dl, VT, V1);
10491     return DAG.getNode(AArch64ISD::EXT, dl, VT, Rev, Rev,
10492                        DAG.getConstant(8, dl, MVT::i32));
10493   }
10494 
10495   bool ReverseEXT = false;
10496   unsigned Imm;
10497   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
10498     if (ReverseEXT)
10499       std::swap(V1, V2);
10500     Imm *= getExtFactor(V1);
10501     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
10502                        DAG.getConstant(Imm, dl, MVT::i32));
10503   } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
10504     Imm *= getExtFactor(V1);
10505     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
10506                        DAG.getConstant(Imm, dl, MVT::i32));
10507   }
10508 
10509   unsigned WhichResult;
10510   if (isZIPMask(ShuffleMask, VT, WhichResult)) {
10511     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10512     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10513   }
10514   if (isUZPMask(ShuffleMask, VT, WhichResult)) {
10515     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10516     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10517   }
10518   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
10519     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10520     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2);
10521   }
10522 
10523   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10524     unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2;
10525     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10526   }
10527   if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10528     unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
10529     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10530   }
10531   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
10532     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
10533     return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
10534   }
10535 
10536   if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
10537     return Concat;
10538 
10539   bool DstIsLeft;
10540   int Anomaly;
10541   int NumInputElements = V1.getValueType().getVectorNumElements();
10542   if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
10543     SDValue DstVec = DstIsLeft ? V1 : V2;
10544     SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
10545 
10546     SDValue SrcVec = V1;
10547     int SrcLane = ShuffleMask[Anomaly];
10548     if (SrcLane >= NumInputElements) {
10549       SrcVec = V2;
10550       SrcLane -= VT.getVectorNumElements();
10551     }
10552     SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
10553 
10554     EVT ScalarVT = VT.getVectorElementType();
10555 
10556     if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger())
10557       ScalarVT = MVT::i32;
10558 
10559     return DAG.getNode(
10560         ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10561         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV),
10562         DstLaneV);
10563   }
10564 
10565   if (SDValue NewSD = tryWidenMaskForShuffle(Op, DAG))
10566     return NewSD;
10567 
10568   // If the shuffle is not directly supported and it has 4 elements, use
10569   // the PerfectShuffle-generated table to synthesize it from other shuffles.
10570   unsigned NumElts = VT.getVectorNumElements();
10571   if (NumElts == 4) {
10572     unsigned PFIndexes[4];
10573     for (unsigned i = 0; i != 4; ++i) {
10574       if (ShuffleMask[i] < 0)
10575         PFIndexes[i] = 8;
10576       else
10577         PFIndexes[i] = ShuffleMask[i];
10578     }
10579 
10580     // Compute the index in the perfect shuffle table.
10581     unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10582                             PFIndexes[2] * 9 + PFIndexes[3];
10583     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10584     return GeneratePerfectShuffle(PFTableIndex, V1, V2, PFEntry, V1, V2, DAG,
10585                                   dl);
10586   }
10587 
10588   return GenerateTBL(Op, ShuffleMask, DAG);
10589 }
10590 
10591 SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
10592                                                  SelectionDAG &DAG) const {
10593   EVT VT = Op.getValueType();
10594 
10595   if (useSVEForFixedLengthVectorVT(VT))
10596     return LowerToScalableOp(Op, DAG);
10597 
10598   assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
10599          "Unexpected vector type!");
10600 
10601   // We can handle the constant cases during isel.
10602   if (isa<ConstantSDNode>(Op.getOperand(0)))
10603     return Op;
10604 
10605   // There isn't a natural way to handle the general i1 case, so we use some
10606   // trickery with whilelo.
10607   SDLoc DL(Op);
10608   SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64);
10609   SplatVal = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, SplatVal,
10610                          DAG.getValueType(MVT::i1));
10611   SDValue ID =
10612       DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
10613   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
10614   if (VT == MVT::nxv1i1)
10615     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::nxv1i1,
10616                        DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::nxv2i1, ID,
10617                                    Zero, SplatVal),
10618                        Zero);
10619   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, ID, Zero, SplatVal);
10620 }
10621 
10622 SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
10623                                              SelectionDAG &DAG) const {
10624   SDLoc DL(Op);
10625 
10626   EVT VT = Op.getValueType();
10627   if (!isTypeLegal(VT) || !VT.isScalableVector())
10628     return SDValue();
10629 
10630   // Current lowering only supports the SVE-ACLE types.
10631   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
10632     return SDValue();
10633 
10634   // The DUPQ operation is indepedent of element type so normalise to i64s.
10635   SDValue Idx128 = Op.getOperand(2);
10636 
10637   // DUPQ can be used when idx is in range.
10638   auto *CIdx = dyn_cast<ConstantSDNode>(Idx128);
10639   if (CIdx && (CIdx->getZExtValue() <= 3)) {
10640     SDValue CI = DAG.getTargetConstant(CIdx->getZExtValue(), DL, MVT::i64);
10641     return DAG.getNode(AArch64ISD::DUPLANE128, DL, VT, Op.getOperand(1), CI);
10642   }
10643 
10644   SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::nxv2i64, Op.getOperand(1));
10645 
10646   // The ACLE says this must produce the same result as:
10647   //   svtbl(data, svadd_x(svptrue_b64(),
10648   //                       svand_x(svptrue_b64(), svindex_u64(0, 1), 1),
10649   //                       index * 2))
10650   SDValue One = DAG.getConstant(1, DL, MVT::i64);
10651   SDValue SplatOne = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, One);
10652 
10653   // create the vector 0,1,0,1,...
10654   SDValue SV = DAG.getStepVector(DL, MVT::nxv2i64);
10655   SV = DAG.getNode(ISD::AND, DL, MVT::nxv2i64, SV, SplatOne);
10656 
10657   // create the vector idx64,idx64+1,idx64,idx64+1,...
10658   SDValue Idx64 = DAG.getNode(ISD::ADD, DL, MVT::i64, Idx128, Idx128);
10659   SDValue SplatIdx64 = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Idx64);
10660   SDValue ShuffleMask = DAG.getNode(ISD::ADD, DL, MVT::nxv2i64, SV, SplatIdx64);
10661 
10662   // create the vector Val[idx64],Val[idx64+1],Val[idx64],Val[idx64+1],...
10663   SDValue TBL = DAG.getNode(AArch64ISD::TBL, DL, MVT::nxv2i64, V, ShuffleMask);
10664   return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
10665 }
10666 
10667 
10668 static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
10669                                APInt &UndefBits) {
10670   EVT VT = BVN->getValueType(0);
10671   APInt SplatBits, SplatUndef;
10672   unsigned SplatBitSize;
10673   bool HasAnyUndefs;
10674   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
10675     unsigned NumSplats = VT.getSizeInBits() / SplatBitSize;
10676 
10677     for (unsigned i = 0; i < NumSplats; ++i) {
10678       CnstBits <<= SplatBitSize;
10679       UndefBits <<= SplatBitSize;
10680       CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits());
10681       UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits());
10682     }
10683 
10684     return true;
10685   }
10686 
10687   return false;
10688 }
10689 
10690 // Try 64-bit splatted SIMD immediate.
10691 static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10692                                  const APInt &Bits) {
10693   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10694     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10695     EVT VT = Op.getValueType();
10696     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64;
10697 
10698     if (AArch64_AM::isAdvSIMDModImmType10(Value)) {
10699       Value = AArch64_AM::encodeAdvSIMDModImmType10(Value);
10700 
10701       SDLoc dl(Op);
10702       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10703                                 DAG.getConstant(Value, dl, MVT::i32));
10704       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10705     }
10706   }
10707 
10708   return SDValue();
10709 }
10710 
10711 // Try 32-bit splatted SIMD immediate.
10712 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10713                                   const APInt &Bits,
10714                                   const SDValue *LHS = nullptr) {
10715   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10716     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10717     EVT VT = Op.getValueType();
10718     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10719     bool isAdvSIMDModImm = false;
10720     uint64_t Shift;
10721 
10722     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) {
10723       Value = AArch64_AM::encodeAdvSIMDModImmType1(Value);
10724       Shift = 0;
10725     }
10726     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) {
10727       Value = AArch64_AM::encodeAdvSIMDModImmType2(Value);
10728       Shift = 8;
10729     }
10730     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) {
10731       Value = AArch64_AM::encodeAdvSIMDModImmType3(Value);
10732       Shift = 16;
10733     }
10734     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) {
10735       Value = AArch64_AM::encodeAdvSIMDModImmType4(Value);
10736       Shift = 24;
10737     }
10738 
10739     if (isAdvSIMDModImm) {
10740       SDLoc dl(Op);
10741       SDValue Mov;
10742 
10743       if (LHS)
10744         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10745                           DAG.getConstant(Value, dl, MVT::i32),
10746                           DAG.getConstant(Shift, dl, MVT::i32));
10747       else
10748         Mov = DAG.getNode(NewOp, dl, MovTy,
10749                           DAG.getConstant(Value, dl, MVT::i32),
10750                           DAG.getConstant(Shift, dl, MVT::i32));
10751 
10752       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10753     }
10754   }
10755 
10756   return SDValue();
10757 }
10758 
10759 // Try 16-bit splatted SIMD immediate.
10760 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10761                                   const APInt &Bits,
10762                                   const SDValue *LHS = nullptr) {
10763   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10764     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10765     EVT VT = Op.getValueType();
10766     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
10767     bool isAdvSIMDModImm = false;
10768     uint64_t Shift;
10769 
10770     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) {
10771       Value = AArch64_AM::encodeAdvSIMDModImmType5(Value);
10772       Shift = 0;
10773     }
10774     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) {
10775       Value = AArch64_AM::encodeAdvSIMDModImmType6(Value);
10776       Shift = 8;
10777     }
10778 
10779     if (isAdvSIMDModImm) {
10780       SDLoc dl(Op);
10781       SDValue Mov;
10782 
10783       if (LHS)
10784         Mov = DAG.getNode(NewOp, dl, MovTy, *LHS,
10785                           DAG.getConstant(Value, dl, MVT::i32),
10786                           DAG.getConstant(Shift, dl, MVT::i32));
10787       else
10788         Mov = DAG.getNode(NewOp, dl, MovTy,
10789                           DAG.getConstant(Value, dl, MVT::i32),
10790                           DAG.getConstant(Shift, dl, MVT::i32));
10791 
10792       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10793     }
10794   }
10795 
10796   return SDValue();
10797 }
10798 
10799 // Try 32-bit splatted SIMD immediate with shifted ones.
10800 static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
10801                                     SelectionDAG &DAG, const APInt &Bits) {
10802   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10803     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10804     EVT VT = Op.getValueType();
10805     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
10806     bool isAdvSIMDModImm = false;
10807     uint64_t Shift;
10808 
10809     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) {
10810       Value = AArch64_AM::encodeAdvSIMDModImmType7(Value);
10811       Shift = 264;
10812     }
10813     else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) {
10814       Value = AArch64_AM::encodeAdvSIMDModImmType8(Value);
10815       Shift = 272;
10816     }
10817 
10818     if (isAdvSIMDModImm) {
10819       SDLoc dl(Op);
10820       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10821                                 DAG.getConstant(Value, dl, MVT::i32),
10822                                 DAG.getConstant(Shift, dl, MVT::i32));
10823       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10824     }
10825   }
10826 
10827   return SDValue();
10828 }
10829 
10830 // Try 8-bit splatted SIMD immediate.
10831 static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10832                                  const APInt &Bits) {
10833   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10834     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10835     EVT VT = Op.getValueType();
10836     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
10837 
10838     if (AArch64_AM::isAdvSIMDModImmType9(Value)) {
10839       Value = AArch64_AM::encodeAdvSIMDModImmType9(Value);
10840 
10841       SDLoc dl(Op);
10842       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10843                                 DAG.getConstant(Value, dl, MVT::i32));
10844       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10845     }
10846   }
10847 
10848   return SDValue();
10849 }
10850 
10851 // Try FP splatted SIMD immediate.
10852 static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
10853                                   const APInt &Bits) {
10854   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
10855     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
10856     EVT VT = Op.getValueType();
10857     bool isWide = (VT.getSizeInBits() == 128);
10858     MVT MovTy;
10859     bool isAdvSIMDModImm = false;
10860 
10861     if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) {
10862       Value = AArch64_AM::encodeAdvSIMDModImmType11(Value);
10863       MovTy = isWide ? MVT::v4f32 : MVT::v2f32;
10864     }
10865     else if (isWide &&
10866              (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) {
10867       Value = AArch64_AM::encodeAdvSIMDModImmType12(Value);
10868       MovTy = MVT::v2f64;
10869     }
10870 
10871     if (isAdvSIMDModImm) {
10872       SDLoc dl(Op);
10873       SDValue Mov = DAG.getNode(NewOp, dl, MovTy,
10874                                 DAG.getConstant(Value, dl, MVT::i32));
10875       return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
10876     }
10877   }
10878 
10879   return SDValue();
10880 }
10881 
10882 // Specialized code to quickly find if PotentialBVec is a BuildVector that
10883 // consists of only the same constant int value, returned in reference arg
10884 // ConstVal
10885 static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
10886                                      uint64_t &ConstVal) {
10887   BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec);
10888   if (!Bvec)
10889     return false;
10890   ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0));
10891   if (!FirstElt)
10892     return false;
10893   EVT VT = Bvec->getValueType(0);
10894   unsigned NumElts = VT.getVectorNumElements();
10895   for (unsigned i = 1; i < NumElts; ++i)
10896     if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt)
10897       return false;
10898   ConstVal = FirstElt->getZExtValue();
10899   return true;
10900 }
10901 
10902 // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
10903 // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
10904 // BUILD_VECTORs with constant element C1, C2 is a constant, and:
10905 //   - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
10906 //   - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
10907 // The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
10908 static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
10909   EVT VT = N->getValueType(0);
10910 
10911   if (!VT.isVector())
10912     return SDValue();
10913 
10914   SDLoc DL(N);
10915 
10916   SDValue And;
10917   SDValue Shift;
10918 
10919   SDValue FirstOp = N->getOperand(0);
10920   unsigned FirstOpc = FirstOp.getOpcode();
10921   SDValue SecondOp = N->getOperand(1);
10922   unsigned SecondOpc = SecondOp.getOpcode();
10923 
10924   // Is one of the operands an AND or a BICi? The AND may have been optimised to
10925   // a BICi in order to use an immediate instead of a register.
10926   // Is the other operand an shl or lshr? This will have been turned into:
10927   // AArch64ISD::VSHL vector, #shift or AArch64ISD::VLSHR vector, #shift.
10928   if ((FirstOpc == ISD::AND || FirstOpc == AArch64ISD::BICi) &&
10929       (SecondOpc == AArch64ISD::VSHL || SecondOpc == AArch64ISD::VLSHR)) {
10930     And = FirstOp;
10931     Shift = SecondOp;
10932 
10933   } else if ((SecondOpc == ISD::AND || SecondOpc == AArch64ISD::BICi) &&
10934              (FirstOpc == AArch64ISD::VSHL || FirstOpc == AArch64ISD::VLSHR)) {
10935     And = SecondOp;
10936     Shift = FirstOp;
10937   } else
10938     return SDValue();
10939 
10940   bool IsAnd = And.getOpcode() == ISD::AND;
10941   bool IsShiftRight = Shift.getOpcode() == AArch64ISD::VLSHR;
10942 
10943   // Is the shift amount constant?
10944   ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
10945   if (!C2node)
10946     return SDValue();
10947 
10948   uint64_t C1;
10949   if (IsAnd) {
10950     // Is the and mask vector all constant?
10951     if (!isAllConstantBuildVector(And.getOperand(1), C1))
10952       return SDValue();
10953   } else {
10954     // Reconstruct the corresponding AND immediate from the two BICi immediates.
10955     ConstantSDNode *C1nodeImm = dyn_cast<ConstantSDNode>(And.getOperand(1));
10956     ConstantSDNode *C1nodeShift = dyn_cast<ConstantSDNode>(And.getOperand(2));
10957     assert(C1nodeImm && C1nodeShift);
10958     C1 = ~(C1nodeImm->getZExtValue() << C1nodeShift->getZExtValue());
10959   }
10960 
10961   // Is C1 == ~(Ones(ElemSizeInBits) << C2) or
10962   // C1 == ~(Ones(ElemSizeInBits) >> C2), taking into account
10963   // how much one can shift elements of a particular size?
10964   uint64_t C2 = C2node->getZExtValue();
10965   unsigned ElemSizeInBits = VT.getScalarSizeInBits();
10966   if (C2 > ElemSizeInBits)
10967     return SDValue();
10968 
10969   APInt C1AsAPInt(ElemSizeInBits, C1);
10970   APInt RequiredC1 = IsShiftRight ? APInt::getHighBitsSet(ElemSizeInBits, C2)
10971                                   : APInt::getLowBitsSet(ElemSizeInBits, C2);
10972   if (C1AsAPInt != RequiredC1)
10973     return SDValue();
10974 
10975   SDValue X = And.getOperand(0);
10976   SDValue Y = Shift.getOperand(0);
10977 
10978   unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
10979   SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Shift.getOperand(1));
10980 
10981   LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n");
10982   LLVM_DEBUG(N->dump(&DAG));
10983   LLVM_DEBUG(dbgs() << "into: \n");
10984   LLVM_DEBUG(ResultSLI->dump(&DAG));
10985 
10986   ++NumShiftInserts;
10987   return ResultSLI;
10988 }
10989 
10990 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
10991                                              SelectionDAG &DAG) const {
10992   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
10993     return LowerToScalableOp(Op, DAG);
10994 
10995   // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
10996   if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
10997     return Res;
10998 
10999   EVT VT = Op.getValueType();
11000 
11001   SDValue LHS = Op.getOperand(0);
11002   BuildVectorSDNode *BVN =
11003       dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
11004   if (!BVN) {
11005     // OR commutes, so try swapping the operands.
11006     LHS = Op.getOperand(1);
11007     BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode());
11008   }
11009   if (!BVN)
11010     return Op;
11011 
11012   APInt DefBits(VT.getSizeInBits(), 0);
11013   APInt UndefBits(VT.getSizeInBits(), 0);
11014   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11015     SDValue NewOp;
11016 
11017     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11018                                     DefBits, &LHS)) ||
11019         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11020                                     DefBits, &LHS)))
11021       return NewOp;
11022 
11023     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG,
11024                                     UndefBits, &LHS)) ||
11025         (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG,
11026                                     UndefBits, &LHS)))
11027       return NewOp;
11028   }
11029 
11030   // We can always fall back to a non-immediate OR.
11031   return Op;
11032 }
11033 
11034 // Normalize the operands of BUILD_VECTOR. The value of constant operands will
11035 // be truncated to fit element width.
11036 static SDValue NormalizeBuildVector(SDValue Op,
11037                                     SelectionDAG &DAG) {
11038   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
11039   SDLoc dl(Op);
11040   EVT VT = Op.getValueType();
11041   EVT EltTy= VT.getVectorElementType();
11042 
11043   if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16)
11044     return Op;
11045 
11046   SmallVector<SDValue, 16> Ops;
11047   for (SDValue Lane : Op->ops()) {
11048     // For integer vectors, type legalization would have promoted the
11049     // operands already. Otherwise, if Op is a floating-point splat
11050     // (with operands cast to integers), then the only possibilities
11051     // are constants and UNDEFs.
11052     if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
11053       APInt LowBits(EltTy.getSizeInBits(),
11054                     CstLane->getZExtValue());
11055       Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
11056     } else if (Lane.getNode()->isUndef()) {
11057       Lane = DAG.getUNDEF(MVT::i32);
11058     } else {
11059       assert(Lane.getValueType() == MVT::i32 &&
11060              "Unexpected BUILD_VECTOR operand type");
11061     }
11062     Ops.push_back(Lane);
11063   }
11064   return DAG.getBuildVector(VT, dl, Ops);
11065 }
11066 
11067 static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) {
11068   EVT VT = Op.getValueType();
11069 
11070   APInt DefBits(VT.getSizeInBits(), 0);
11071   APInt UndefBits(VT.getSizeInBits(), 0);
11072   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11073   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
11074     SDValue NewOp;
11075     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11076         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11077         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11078         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11079         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11080         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11081       return NewOp;
11082 
11083     DefBits = ~DefBits;
11084     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11085         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11086         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11087       return NewOp;
11088 
11089     DefBits = UndefBits;
11090     if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) ||
11091         (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11092         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) ||
11093         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) ||
11094         (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) ||
11095         (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits)))
11096       return NewOp;
11097 
11098     DefBits = ~UndefBits;
11099     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) ||
11100         (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) ||
11101         (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits)))
11102       return NewOp;
11103   }
11104 
11105   return SDValue();
11106 }
11107 
11108 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
11109                                                  SelectionDAG &DAG) const {
11110   EVT VT = Op.getValueType();
11111 
11112   // Try to build a simple constant vector.
11113   Op = NormalizeBuildVector(Op, DAG);
11114   if (VT.isInteger()) {
11115     // Certain vector constants, used to express things like logical NOT and
11116     // arithmetic NEG, are passed through unmodified.  This allows special
11117     // patterns for these operations to match, which will lower these constants
11118     // to whatever is proven necessary.
11119     BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
11120     if (BVN->isConstant())
11121       if (ConstantSDNode *Const = BVN->getConstantSplatNode()) {
11122         unsigned BitSize = VT.getVectorElementType().getSizeInBits();
11123         APInt Val(BitSize,
11124                   Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
11125         if (Val.isZero() || Val.isAllOnes())
11126           return Op;
11127       }
11128   }
11129 
11130   if (SDValue V = ConstantBuildVector(Op, DAG))
11131     return V;
11132 
11133   // Scan through the operands to find some interesting properties we can
11134   // exploit:
11135   //   1) If only one value is used, we can use a DUP, or
11136   //   2) if only the low element is not undef, we can just insert that, or
11137   //   3) if only one constant value is used (w/ some non-constant lanes),
11138   //      we can splat the constant value into the whole vector then fill
11139   //      in the non-constant lanes.
11140   //   4) FIXME: If different constant values are used, but we can intelligently
11141   //             select the values we'll be overwriting for the non-constant
11142   //             lanes such that we can directly materialize the vector
11143   //             some other way (MOVI, e.g.), we can be sneaky.
11144   //   5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP.
11145   SDLoc dl(Op);
11146   unsigned NumElts = VT.getVectorNumElements();
11147   bool isOnlyLowElement = true;
11148   bool usesOnlyOneValue = true;
11149   bool usesOnlyOneConstantValue = true;
11150   bool isConstant = true;
11151   bool AllLanesExtractElt = true;
11152   unsigned NumConstantLanes = 0;
11153   unsigned NumDifferentLanes = 0;
11154   unsigned NumUndefLanes = 0;
11155   SDValue Value;
11156   SDValue ConstantValue;
11157   for (unsigned i = 0; i < NumElts; ++i) {
11158     SDValue V = Op.getOperand(i);
11159     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11160       AllLanesExtractElt = false;
11161     if (V.isUndef()) {
11162       ++NumUndefLanes;
11163       continue;
11164     }
11165     if (i > 0)
11166       isOnlyLowElement = false;
11167     if (!isIntOrFPConstant(V))
11168       isConstant = false;
11169 
11170     if (isIntOrFPConstant(V)) {
11171       ++NumConstantLanes;
11172       if (!ConstantValue.getNode())
11173         ConstantValue = V;
11174       else if (ConstantValue != V)
11175         usesOnlyOneConstantValue = false;
11176     }
11177 
11178     if (!Value.getNode())
11179       Value = V;
11180     else if (V != Value) {
11181       usesOnlyOneValue = false;
11182       ++NumDifferentLanes;
11183     }
11184   }
11185 
11186   if (!Value.getNode()) {
11187     LLVM_DEBUG(
11188         dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n");
11189     return DAG.getUNDEF(VT);
11190   }
11191 
11192   // Convert BUILD_VECTOR where all elements but the lowest are undef into
11193   // SCALAR_TO_VECTOR, except for when we have a single-element constant vector
11194   // as SimplifyDemandedBits will just turn that back into BUILD_VECTOR.
11195   if (isOnlyLowElement && !(NumElts == 1 && isIntOrFPConstant(Value))) {
11196     LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 "
11197                          "SCALAR_TO_VECTOR node\n");
11198     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
11199   }
11200 
11201   if (AllLanesExtractElt) {
11202     SDNode *Vector = nullptr;
11203     bool Even = false;
11204     bool Odd = false;
11205     // Check whether the extract elements match the Even pattern <0,2,4,...> or
11206     // the Odd pattern <1,3,5,...>.
11207     for (unsigned i = 0; i < NumElts; ++i) {
11208       SDValue V = Op.getOperand(i);
11209       const SDNode *N = V.getNode();
11210       if (!isa<ConstantSDNode>(N->getOperand(1)))
11211         break;
11212       SDValue N0 = N->getOperand(0);
11213 
11214       // All elements are extracted from the same vector.
11215       if (!Vector) {
11216         Vector = N0.getNode();
11217         // Check that the type of EXTRACT_VECTOR_ELT matches the type of
11218         // BUILD_VECTOR.
11219         if (VT.getVectorElementType() !=
11220             N0.getValueType().getVectorElementType())
11221           break;
11222       } else if (Vector != N0.getNode()) {
11223         Odd = false;
11224         Even = false;
11225         break;
11226       }
11227 
11228       // Extracted values are either at Even indices <0,2,4,...> or at Odd
11229       // indices <1,3,5,...>.
11230       uint64_t Val = N->getConstantOperandVal(1);
11231       if (Val == 2 * i) {
11232         Even = true;
11233         continue;
11234       }
11235       if (Val - 1 == 2 * i) {
11236         Odd = true;
11237         continue;
11238       }
11239 
11240       // Something does not match: abort.
11241       Odd = false;
11242       Even = false;
11243       break;
11244     }
11245     if (Even || Odd) {
11246       SDValue LHS =
11247           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11248                       DAG.getConstant(0, dl, MVT::i64));
11249       SDValue RHS =
11250           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0),
11251                       DAG.getConstant(NumElts, dl, MVT::i64));
11252 
11253       if (Even && !Odd)
11254         return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
11255                            RHS);
11256       if (Odd && !Even)
11257         return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
11258                            RHS);
11259     }
11260   }
11261 
11262   // Use DUP for non-constant splats. For f32 constant splats, reduce to
11263   // i32 and try again.
11264   if (usesOnlyOneValue) {
11265     if (!isConstant) {
11266       if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11267           Value.getValueType() != VT) {
11268         LLVM_DEBUG(
11269             dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n");
11270         return DAG.getNode(AArch64ISD::DUP, dl, VT, Value);
11271       }
11272 
11273       // This is actually a DUPLANExx operation, which keeps everything vectory.
11274 
11275       SDValue Lane = Value.getOperand(1);
11276       Value = Value.getOperand(0);
11277       if (Value.getValueSizeInBits() == 64) {
11278         LLVM_DEBUG(
11279             dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
11280                       "widening it\n");
11281         Value = WidenVector(Value, DAG);
11282       }
11283 
11284       unsigned Opcode = getDUPLANEOp(VT.getVectorElementType());
11285       return DAG.getNode(Opcode, dl, VT, Value, Lane);
11286     }
11287 
11288     if (VT.getVectorElementType().isFloatingPoint()) {
11289       SmallVector<SDValue, 8> Ops;
11290       EVT EltTy = VT.getVectorElementType();
11291       assert ((EltTy == MVT::f16 || EltTy == MVT::bf16 || EltTy == MVT::f32 ||
11292                EltTy == MVT::f64) && "Unsupported floating-point vector type");
11293       LLVM_DEBUG(
11294           dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int "
11295                     "BITCASTS, and try again\n");
11296       MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits());
11297       for (unsigned i = 0; i < NumElts; ++i)
11298         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
11299       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
11300       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
11301       LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: ";
11302                  Val.dump(););
11303       Val = LowerBUILD_VECTOR(Val, DAG);
11304       if (Val.getNode())
11305         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
11306     }
11307   }
11308 
11309   // If we need to insert a small number of different non-constant elements and
11310   // the vector width is sufficiently large, prefer using DUP with the common
11311   // value and INSERT_VECTOR_ELT for the different lanes. If DUP is preferred,
11312   // skip the constant lane handling below.
11313   bool PreferDUPAndInsert =
11314       !isConstant && NumDifferentLanes >= 1 &&
11315       NumDifferentLanes < ((NumElts - NumUndefLanes) / 2) &&
11316       NumDifferentLanes >= NumConstantLanes;
11317 
11318   // If there was only one constant value used and for more than one lane,
11319   // start by splatting that value, then replace the non-constant lanes. This
11320   // is better than the default, which will perform a separate initialization
11321   // for each lane.
11322   if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) {
11323     // Firstly, try to materialize the splat constant.
11324     SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue),
11325             Val = ConstantBuildVector(Vec, DAG);
11326     if (!Val) {
11327       // Otherwise, materialize the constant and splat it.
11328       Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue);
11329       DAG.ReplaceAllUsesWith(Vec.getNode(), &Val);
11330     }
11331 
11332     // Now insert the non-constant lanes.
11333     for (unsigned i = 0; i < NumElts; ++i) {
11334       SDValue V = Op.getOperand(i);
11335       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11336       if (!isIntOrFPConstant(V))
11337         // Note that type legalization likely mucked about with the VT of the
11338         // source operand, so we may have to convert it here before inserting.
11339         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx);
11340     }
11341     return Val;
11342   }
11343 
11344   // This will generate a load from the constant pool.
11345   if (isConstant) {
11346     LLVM_DEBUG(
11347         dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default "
11348                   "expansion\n");
11349     return SDValue();
11350   }
11351 
11352   // Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
11353   // v4i32s. This is really a truncate, which we can construct out of (legal)
11354   // concats and truncate nodes.
11355   if (SDValue M = ReconstructTruncateFromBuildVector(Op, DAG))
11356     return M;
11357 
11358   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
11359   if (NumElts >= 4) {
11360     if (SDValue shuffle = ReconstructShuffle(Op, DAG))
11361       return shuffle;
11362   }
11363 
11364   if (PreferDUPAndInsert) {
11365     // First, build a constant vector with the common element.
11366     SmallVector<SDValue, 8> Ops(NumElts, Value);
11367     SDValue NewVector = LowerBUILD_VECTOR(DAG.getBuildVector(VT, dl, Ops), DAG);
11368     // Next, insert the elements that do not match the common value.
11369     for (unsigned I = 0; I < NumElts; ++I)
11370       if (Op.getOperand(I) != Value)
11371         NewVector =
11372             DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, NewVector,
11373                         Op.getOperand(I), DAG.getConstant(I, dl, MVT::i64));
11374 
11375     return NewVector;
11376   }
11377 
11378   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
11379   // know the default expansion would otherwise fall back on something even
11380   // worse. For a vector with one or two non-undef values, that's
11381   // scalar_to_vector for the elements followed by a shuffle (provided the
11382   // shuffle is valid for the target) and materialization element by element
11383   // on the stack followed by a load for everything else.
11384   if (!isConstant && !usesOnlyOneValue) {
11385     LLVM_DEBUG(
11386         dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence "
11387                   "of INSERT_VECTOR_ELT\n");
11388 
11389     SDValue Vec = DAG.getUNDEF(VT);
11390     SDValue Op0 = Op.getOperand(0);
11391     unsigned i = 0;
11392 
11393     // Use SCALAR_TO_VECTOR for lane zero to
11394     // a) Avoid a RMW dependency on the full vector register, and
11395     // b) Allow the register coalescer to fold away the copy if the
11396     //    value is already in an S or D register, and we're forced to emit an
11397     //    INSERT_SUBREG that we can't fold anywhere.
11398     //
11399     // We also allow types like i8 and i16 which are illegal scalar but legal
11400     // vector element types. After type-legalization the inserted value is
11401     // extended (i32) and it is safe to cast them to the vector type by ignoring
11402     // the upper bits of the lowest lane (e.g. v8i8, v4i16).
11403     if (!Op0.isUndef()) {
11404       LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
11405       Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
11406       ++i;
11407     }
11408     LLVM_DEBUG(if (i < NumElts) dbgs()
11409                    << "Creating nodes for the other vector elements:\n";);
11410     for (; i < NumElts; ++i) {
11411       SDValue V = Op.getOperand(i);
11412       if (V.isUndef())
11413         continue;
11414       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
11415       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
11416     }
11417     return Vec;
11418   }
11419 
11420   LLVM_DEBUG(
11421       dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find "
11422                 "better alternative\n");
11423   return SDValue();
11424 }
11425 
11426 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
11427                                                    SelectionDAG &DAG) const {
11428   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11429     return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
11430 
11431   assert(Op.getValueType().isScalableVector() &&
11432          isTypeLegal(Op.getValueType()) &&
11433          "Expected legal scalable vector type!");
11434 
11435   if (isTypeLegal(Op.getOperand(0).getValueType())) {
11436     unsigned NumOperands = Op->getNumOperands();
11437     assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11438            "Unexpected number of operands in CONCAT_VECTORS");
11439 
11440     if (NumOperands == 2)
11441       return Op;
11442 
11443     // Concat each pair of subvectors and pack into the lower half of the array.
11444     SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end());
11445     while (ConcatOps.size() > 1) {
11446       for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) {
11447         SDValue V1 = ConcatOps[I];
11448         SDValue V2 = ConcatOps[I + 1];
11449         EVT SubVT = V1.getValueType();
11450         EVT PairVT = SubVT.getDoubleNumVectorElementsVT(*DAG.getContext());
11451         ConcatOps[I / 2] =
11452             DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), PairVT, V1, V2);
11453       }
11454       ConcatOps.resize(ConcatOps.size() / 2);
11455     }
11456     return ConcatOps[0];
11457   }
11458 
11459   return SDValue();
11460 }
11461 
11462 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
11463                                                       SelectionDAG &DAG) const {
11464   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
11465 
11466   if (useSVEForFixedLengthVectorVT(Op.getValueType()))
11467     return LowerFixedLengthInsertVectorElt(Op, DAG);
11468 
11469   // Check for non-constant or out of range lane.
11470   EVT VT = Op.getOperand(0).getValueType();
11471 
11472   if (VT.getScalarType() == MVT::i1) {
11473     EVT VectorVT = getPromotedVTForPredicate(VT);
11474     SDLoc DL(Op);
11475     SDValue ExtendedVector =
11476         DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT);
11477     SDValue ExtendedValue =
11478         DAG.getAnyExtOrTrunc(Op.getOperand(1), DL,
11479                              VectorVT.getScalarType().getSizeInBits() < 32
11480                                  ? MVT::i32
11481                                  : VectorVT.getScalarType());
11482     ExtendedVector =
11483         DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VectorVT, ExtendedVector,
11484                     ExtendedValue, Op.getOperand(2));
11485     return DAG.getAnyExtOrTrunc(ExtendedVector, DL, VT);
11486   }
11487 
11488   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2));
11489   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11490     return SDValue();
11491 
11492   // Insertion/extraction are legal for V128 types.
11493   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11494       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11495       VT == MVT::v8f16 || VT == MVT::v8bf16)
11496     return Op;
11497 
11498   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11499       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11500       VT != MVT::v4bf16)
11501     return SDValue();
11502 
11503   // For V64 types, we perform insertion by expanding the value
11504   // to a V128 type and perform the insertion on that.
11505   SDLoc DL(Op);
11506   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11507   EVT WideTy = WideVec.getValueType();
11508 
11509   SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec,
11510                              Op.getOperand(1), Op.getOperand(2));
11511   // Re-narrow the resultant vector.
11512   return NarrowVector(Node, DAG);
11513 }
11514 
11515 SDValue
11516 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
11517                                                SelectionDAG &DAG) const {
11518   assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!");
11519   EVT VT = Op.getOperand(0).getValueType();
11520 
11521   if (VT.getScalarType() == MVT::i1) {
11522     // We can't directly extract from an SVE predicate; extend it first.
11523     // (This isn't the only possible lowering, but it's straightforward.)
11524     EVT VectorVT = getPromotedVTForPredicate(VT);
11525     SDLoc DL(Op);
11526     SDValue Extend =
11527         DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0));
11528     MVT ExtractTy = VectorVT == MVT::nxv2i64 ? MVT::i64 : MVT::i32;
11529     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractTy,
11530                                   Extend, Op.getOperand(1));
11531     return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
11532   }
11533 
11534   if (useSVEForFixedLengthVectorVT(VT))
11535     return LowerFixedLengthExtractVectorElt(Op, DAG);
11536 
11537   // Check for non-constant or out of range lane.
11538   ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1));
11539   if (!CI || CI->getZExtValue() >= VT.getVectorNumElements())
11540     return SDValue();
11541 
11542   // Insertion/extraction are legal for V128 types.
11543   if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11544       VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 ||
11545       VT == MVT::v8f16 || VT == MVT::v8bf16)
11546     return Op;
11547 
11548   if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 &&
11549       VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16 &&
11550       VT != MVT::v4bf16)
11551     return SDValue();
11552 
11553   // For V64 types, we perform extraction by expanding the value
11554   // to a V128 type and perform the extraction on that.
11555   SDLoc DL(Op);
11556   SDValue WideVec = WidenVector(Op.getOperand(0), DAG);
11557   EVT WideTy = WideVec.getValueType();
11558 
11559   EVT ExtrTy = WideTy.getVectorElementType();
11560   if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8)
11561     ExtrTy = MVT::i32;
11562 
11563   // For extractions, we just return the result directly.
11564   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec,
11565                      Op.getOperand(1));
11566 }
11567 
11568 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
11569                                                       SelectionDAG &DAG) const {
11570   assert(Op.getValueType().isFixedLengthVector() &&
11571          "Only cases that extract a fixed length vector are supported!");
11572 
11573   EVT InVT = Op.getOperand(0).getValueType();
11574   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
11575   unsigned Size = Op.getValueSizeInBits();
11576 
11577   // If we don't have legal types yet, do nothing
11578   if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
11579     return SDValue();
11580 
11581   if (InVT.isScalableVector()) {
11582     // This will be matched by custom code during ISelDAGToDAG.
11583     if (Idx == 0 && isPackedVectorType(InVT, DAG))
11584       return Op;
11585 
11586     return SDValue();
11587   }
11588 
11589   // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.
11590   if (Idx == 0 && InVT.getSizeInBits() <= 128)
11591     return Op;
11592 
11593   // If this is extracting the upper 64-bits of a 128-bit vector, we match
11594   // that directly.
11595   if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
11596       InVT.getSizeInBits() == 128)
11597     return Op;
11598 
11599   if (useSVEForFixedLengthVectorVT(InVT)) {
11600     SDLoc DL(Op);
11601 
11602     EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
11603     SDValue NewInVec =
11604         convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
11605 
11606     SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
11607                                  NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
11608     return convertFromScalableVector(DAG, Op.getValueType(), Splice);
11609   }
11610 
11611   return SDValue();
11612 }
11613 
11614 SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
11615                                                      SelectionDAG &DAG) const {
11616   assert(Op.getValueType().isScalableVector() &&
11617          "Only expect to lower inserts into scalable vectors!");
11618 
11619   EVT InVT = Op.getOperand(1).getValueType();
11620   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
11621 
11622   SDValue Vec0 = Op.getOperand(0);
11623   SDValue Vec1 = Op.getOperand(1);
11624   SDLoc DL(Op);
11625   EVT VT = Op.getValueType();
11626 
11627   if (InVT.isScalableVector()) {
11628     if (!isTypeLegal(VT))
11629       return SDValue();
11630 
11631     // Break down insert_subvector into simpler parts.
11632     if (VT.getVectorElementType() == MVT::i1) {
11633       unsigned NumElts = VT.getVectorMinNumElements();
11634       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
11635 
11636       SDValue Lo, Hi;
11637       Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11638                        DAG.getVectorIdxConstant(0, DL));
11639       Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, Vec0,
11640                        DAG.getVectorIdxConstant(NumElts / 2, DL));
11641       if (Idx < (NumElts / 2)) {
11642         SDValue NewLo = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Lo, Vec1,
11643                                     DAG.getVectorIdxConstant(Idx, DL));
11644         return DAG.getNode(AArch64ISD::UZP1, DL, VT, NewLo, Hi);
11645       } else {
11646         SDValue NewHi =
11647             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, HalfVT, Hi, Vec1,
11648                         DAG.getVectorIdxConstant(Idx - (NumElts / 2), DL));
11649         return DAG.getNode(AArch64ISD::UZP1, DL, VT, Lo, NewHi);
11650       }
11651     }
11652 
11653     // Ensure the subvector is half the size of the main vector.
11654     if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2))
11655       return SDValue();
11656 
11657     // Here narrow and wide refers to the vector element types. After "casting"
11658     // both vectors must have the same bit length and so because the subvector
11659     // has fewer elements, those elements need to be bigger.
11660     EVT NarrowVT = getPackedSVEVectorVT(VT.getVectorElementCount());
11661     EVT WideVT = getPackedSVEVectorVT(InVT.getVectorElementCount());
11662 
11663     // NOP cast operands to the largest legal vector of the same element count.
11664     if (VT.isFloatingPoint()) {
11665       Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
11666       Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
11667     } else {
11668       // Legal integer vectors are already their largest so Vec0 is fine as is.
11669       Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
11670     }
11671 
11672     // To replace the top/bottom half of vector V with vector SubV we widen the
11673     // preserved half of V, concatenate this to SubV (the order depending on the
11674     // half being replaced) and then narrow the result.
11675     SDValue Narrow;
11676     if (Idx == 0) {
11677       SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
11678       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
11679     } else {
11680       assert(Idx == InVT.getVectorMinNumElements() &&
11681              "Invalid subvector index!");
11682       SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
11683       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
11684     }
11685 
11686     return getSVESafeBitCast(VT, Narrow, DAG);
11687   }
11688 
11689   if (Idx == 0 && isPackedVectorType(VT, DAG)) {
11690     // This will be matched by custom code during ISelDAGToDAG.
11691     if (Vec0.isUndef())
11692       return Op;
11693 
11694     Optional<unsigned> PredPattern =
11695         getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
11696     auto PredTy = VT.changeVectorElementType(MVT::i1);
11697     SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
11698     SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
11699     return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
11700   }
11701 
11702   return SDValue();
11703 }
11704 
11705 static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {
11706   if (Op.getOpcode() != AArch64ISD::DUP &&
11707       Op.getOpcode() != ISD::SPLAT_VECTOR &&
11708       Op.getOpcode() != ISD::BUILD_VECTOR)
11709     return false;
11710 
11711   if (Op.getOpcode() == ISD::BUILD_VECTOR &&
11712       !isAllConstantBuildVector(Op, SplatVal))
11713     return false;
11714 
11715   if (Op.getOpcode() != ISD::BUILD_VECTOR &&
11716       !isa<ConstantSDNode>(Op->getOperand(0)))
11717     return false;
11718 
11719   SplatVal = Op->getConstantOperandVal(0);
11720   if (Op.getValueType().getVectorElementType() != MVT::i64)
11721     SplatVal = (int32_t)SplatVal;
11722 
11723   Negated = false;
11724   if (isPowerOf2_64(SplatVal))
11725     return true;
11726 
11727   Negated = true;
11728   if (isPowerOf2_64(-SplatVal)) {
11729     SplatVal = -SplatVal;
11730     return true;
11731   }
11732 
11733   return false;
11734 }
11735 
11736 SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
11737   EVT VT = Op.getValueType();
11738   SDLoc dl(Op);
11739 
11740   if (useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
11741     return LowerFixedLengthVectorIntDivideToSVE(Op, DAG);
11742 
11743   assert(VT.isScalableVector() && "Expected a scalable vector.");
11744 
11745   bool Signed = Op.getOpcode() == ISD::SDIV;
11746   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
11747 
11748   bool Negated;
11749   uint64_t SplatVal;
11750   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
11751     SDValue Pg = getPredicateForScalableVector(DAG, dl, VT);
11752     SDValue Res =
11753         DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0),
11754                     DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32));
11755     if (Negated)
11756       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
11757 
11758     return Res;
11759   }
11760 
11761   if (VT == MVT::nxv4i32 || VT == MVT::nxv2i64)
11762     return LowerToPredicatedOp(Op, DAG, PredOpcode);
11763 
11764   // SVE doesn't have i8 and i16 DIV operations; widen them to 32-bit
11765   // operations, and truncate the result.
11766   EVT WidenedVT;
11767   if (VT == MVT::nxv16i8)
11768     WidenedVT = MVT::nxv8i16;
11769   else if (VT == MVT::nxv8i16)
11770     WidenedVT = MVT::nxv4i32;
11771   else
11772     llvm_unreachable("Unexpected Custom DIV operation");
11773 
11774   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
11775   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
11776   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0));
11777   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(1));
11778   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0));
11779   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
11780   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
11781   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
11782   return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
11783 }
11784 
11785 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
11786   // Currently no fixed length shuffles that require SVE are legal.
11787   if (useSVEForFixedLengthVectorVT(VT))
11788     return false;
11789 
11790   if (VT.getVectorNumElements() == 4 &&
11791       (VT.is128BitVector() || VT.is64BitVector())) {
11792     unsigned Cost = getPerfectShuffleCost(M);
11793     if (Cost <= 1)
11794       return true;
11795   }
11796 
11797   bool DummyBool;
11798   int DummyInt;
11799   unsigned DummyUnsigned;
11800 
11801   return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
11802           isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
11803           isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
11804           // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
11805           isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
11806           isZIPMask(M, VT, DummyUnsigned) ||
11807           isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
11808           isUZP_v_undef_Mask(M, VT, DummyUnsigned) ||
11809           isZIP_v_undef_Mask(M, VT, DummyUnsigned) ||
11810           isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) ||
11811           isConcatMask(M, VT, VT.getSizeInBits() == 128));
11812 }
11813 
11814 /// getVShiftImm - Check if this is a valid build_vector for the immediate
11815 /// operand of a vector shift operation, where all the elements of the
11816 /// build_vector must have the same constant integer value.
11817 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11818   // Ignore bit_converts.
11819   while (Op.getOpcode() == ISD::BITCAST)
11820     Op = Op.getOperand(0);
11821   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11822   APInt SplatBits, SplatUndef;
11823   unsigned SplatBitSize;
11824   bool HasAnyUndefs;
11825   if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11826                                     HasAnyUndefs, ElementBits) ||
11827       SplatBitSize > ElementBits)
11828     return false;
11829   Cnt = SplatBits.getSExtValue();
11830   return true;
11831 }
11832 
11833 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11834 /// operand of a vector shift left operation.  That value must be in the range:
11835 ///   0 <= Value < ElementBits for a left shift; or
11836 ///   0 <= Value <= ElementBits for a long left shift.
11837 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11838   assert(VT.isVector() && "vector shift count is not a vector type");
11839   int64_t ElementBits = VT.getScalarSizeInBits();
11840   if (!getVShiftImm(Op, ElementBits, Cnt))
11841     return false;
11842   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
11843 }
11844 
11845 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11846 /// operand of a vector shift right operation. The value must be in the range:
11847 ///   1 <= Value <= ElementBits for a right shift; or
11848 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
11849   assert(VT.isVector() && "vector shift count is not a vector type");
11850   int64_t ElementBits = VT.getScalarSizeInBits();
11851   if (!getVShiftImm(Op, ElementBits, Cnt))
11852     return false;
11853   return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
11854 }
11855 
11856 SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
11857                                              SelectionDAG &DAG) const {
11858   EVT VT = Op.getValueType();
11859 
11860   if (VT.getScalarType() == MVT::i1) {
11861     // Lower i1 truncate to `(x & 1) != 0`.
11862     SDLoc dl(Op);
11863     EVT OpVT = Op.getOperand(0).getValueType();
11864     SDValue Zero = DAG.getConstant(0, dl, OpVT);
11865     SDValue One = DAG.getConstant(1, dl, OpVT);
11866     SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One);
11867     return DAG.getSetCC(dl, VT, And, Zero, ISD::SETNE);
11868   }
11869 
11870   if (!VT.isVector() || VT.isScalableVector())
11871     return SDValue();
11872 
11873   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
11874     return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
11875 
11876   return SDValue();
11877 }
11878 
11879 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
11880                                                       SelectionDAG &DAG) const {
11881   EVT VT = Op.getValueType();
11882   SDLoc DL(Op);
11883   int64_t Cnt;
11884 
11885   if (!Op.getOperand(1).getValueType().isVector())
11886     return Op;
11887   unsigned EltSize = VT.getScalarSizeInBits();
11888 
11889   switch (Op.getOpcode()) {
11890   case ISD::SHL:
11891     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
11892       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
11893 
11894     if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
11895       return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
11896                          DAG.getConstant(Cnt, DL, MVT::i32));
11897     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11898                        DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
11899                                        MVT::i32),
11900                        Op.getOperand(0), Op.getOperand(1));
11901   case ISD::SRA:
11902   case ISD::SRL:
11903     if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) {
11904       unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
11905                                                 : AArch64ISD::SRL_PRED;
11906       return LowerToPredicatedOp(Op, DAG, Opc);
11907     }
11908 
11909     // Right shift immediate
11910     if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) {
11911       unsigned Opc =
11912           (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
11913       return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
11914                          DAG.getConstant(Cnt, DL, MVT::i32));
11915     }
11916 
11917     // Right shift register.  Note, there is not a shift right register
11918     // instruction, but the shift left register instruction takes a signed
11919     // value, where negative numbers specify a right shift.
11920     unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl
11921                                                 : Intrinsic::aarch64_neon_ushl;
11922     // negate the shift amount
11923     SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
11924                                    Op.getOperand(1));
11925     SDValue NegShiftLeft =
11926         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
11927                     DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
11928                     NegShift);
11929     return NegShiftLeft;
11930   }
11931 
11932   llvm_unreachable("unexpected shift opcode");
11933 }
11934 
11935 static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
11936                                     AArch64CC::CondCode CC, bool NoNans, EVT VT,
11937                                     const SDLoc &dl, SelectionDAG &DAG) {
11938   EVT SrcVT = LHS.getValueType();
11939   assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
11940          "function only supposed to emit natural comparisons");
11941 
11942   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
11943   APInt CnstBits(VT.getSizeInBits(), 0);
11944   APInt UndefBits(VT.getSizeInBits(), 0);
11945   bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits);
11946   bool IsZero = IsCnst && (CnstBits == 0);
11947 
11948   if (SrcVT.getVectorElementType().isFloatingPoint()) {
11949     switch (CC) {
11950     default:
11951       return SDValue();
11952     case AArch64CC::NE: {
11953       SDValue Fcmeq;
11954       if (IsZero)
11955         Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
11956       else
11957         Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
11958       return DAG.getNOT(dl, Fcmeq, VT);
11959     }
11960     case AArch64CC::EQ:
11961       if (IsZero)
11962         return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS);
11963       return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS);
11964     case AArch64CC::GE:
11965       if (IsZero)
11966         return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS);
11967       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS);
11968     case AArch64CC::GT:
11969       if (IsZero)
11970         return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS);
11971       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS);
11972     case AArch64CC::LS:
11973       if (IsZero)
11974         return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS);
11975       return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS);
11976     case AArch64CC::LT:
11977       if (!NoNans)
11978         return SDValue();
11979       // If we ignore NaNs then we can use to the MI implementation.
11980       LLVM_FALLTHROUGH;
11981     case AArch64CC::MI:
11982       if (IsZero)
11983         return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
11984       return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS);
11985     }
11986   }
11987 
11988   switch (CC) {
11989   default:
11990     return SDValue();
11991   case AArch64CC::NE: {
11992     SDValue Cmeq;
11993     if (IsZero)
11994       Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
11995     else
11996       Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
11997     return DAG.getNOT(dl, Cmeq, VT);
11998   }
11999   case AArch64CC::EQ:
12000     if (IsZero)
12001       return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS);
12002     return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS);
12003   case AArch64CC::GE:
12004     if (IsZero)
12005       return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS);
12006     return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS);
12007   case AArch64CC::GT:
12008     if (IsZero)
12009       return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS);
12010     return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS);
12011   case AArch64CC::LE:
12012     if (IsZero)
12013       return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS);
12014     return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS);
12015   case AArch64CC::LS:
12016     return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS);
12017   case AArch64CC::LO:
12018     return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS);
12019   case AArch64CC::LT:
12020     if (IsZero)
12021       return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS);
12022     return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS);
12023   case AArch64CC::HI:
12024     return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS);
12025   case AArch64CC::HS:
12026     return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS);
12027   }
12028 }
12029 
12030 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
12031                                            SelectionDAG &DAG) const {
12032   if (Op.getValueType().isScalableVector())
12033     return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
12034 
12035   if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
12036     return LowerFixedLengthVectorSetccToSVE(Op, DAG);
12037 
12038   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
12039   SDValue LHS = Op.getOperand(0);
12040   SDValue RHS = Op.getOperand(1);
12041   EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
12042   SDLoc dl(Op);
12043 
12044   if (LHS.getValueType().getVectorElementType().isInteger()) {
12045     assert(LHS.getValueType() == RHS.getValueType());
12046     AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
12047     SDValue Cmp =
12048         EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
12049     return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12050   }
12051 
12052   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
12053 
12054   // Make v4f16 (only) fcmp operations utilise vector instructions
12055   // v8f16 support will be a litle more complicated
12056   if (!FullFP16 && LHS.getValueType().getVectorElementType() == MVT::f16) {
12057     if (LHS.getValueType().getVectorNumElements() == 4) {
12058       LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS);
12059       RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS);
12060       SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC);
12061       DAG.ReplaceAllUsesWith(Op, NewSetcc);
12062       CmpVT = MVT::v4i32;
12063     } else
12064       return SDValue();
12065   }
12066 
12067   assert((!FullFP16 && LHS.getValueType().getVectorElementType() != MVT::f16) ||
12068           LHS.getValueType().getVectorElementType() != MVT::f128);
12069 
12070   // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally
12071   // clean.  Some of them require two branches to implement.
12072   AArch64CC::CondCode CC1, CC2;
12073   bool ShouldInvert;
12074   changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert);
12075 
12076   bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath;
12077   SDValue Cmp =
12078       EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
12079   if (!Cmp.getNode())
12080     return SDValue();
12081 
12082   if (CC2 != AArch64CC::AL) {
12083     SDValue Cmp2 =
12084         EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
12085     if (!Cmp2.getNode())
12086       return SDValue();
12087 
12088     Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
12089   }
12090 
12091   Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
12092 
12093   if (ShouldInvert)
12094     Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());
12095 
12096   return Cmp;
12097 }
12098 
12099 static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
12100                                   SelectionDAG &DAG) {
12101   SDValue VecOp = ScalarOp.getOperand(0);
12102   auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
12103   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
12104                      DAG.getConstant(0, DL, MVT::i64));
12105 }
12106 
12107 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
12108                                               SelectionDAG &DAG) const {
12109   SDValue Src = Op.getOperand(0);
12110 
12111   // Try to lower fixed length reductions to SVE.
12112   EVT SrcVT = Src.getValueType();
12113   bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND ||
12114                       Op.getOpcode() == ISD::VECREDUCE_OR ||
12115                       Op.getOpcode() == ISD::VECREDUCE_XOR ||
12116                       Op.getOpcode() == ISD::VECREDUCE_FADD ||
12117                       (Op.getOpcode() != ISD::VECREDUCE_ADD &&
12118                        SrcVT.getVectorElementType() == MVT::i64);
12119   if (SrcVT.isScalableVector() ||
12120       useSVEForFixedLengthVectorVT(
12121           SrcVT, OverrideNEON && Subtarget->useSVEForFixedLengthVectors())) {
12122 
12123     if (SrcVT.getVectorElementType() == MVT::i1)
12124       return LowerPredReductionToSVE(Op, DAG);
12125 
12126     switch (Op.getOpcode()) {
12127     case ISD::VECREDUCE_ADD:
12128       return LowerReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
12129     case ISD::VECREDUCE_AND:
12130       return LowerReductionToSVE(AArch64ISD::ANDV_PRED, Op, DAG);
12131     case ISD::VECREDUCE_OR:
12132       return LowerReductionToSVE(AArch64ISD::ORV_PRED, Op, DAG);
12133     case ISD::VECREDUCE_SMAX:
12134       return LowerReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
12135     case ISD::VECREDUCE_SMIN:
12136       return LowerReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
12137     case ISD::VECREDUCE_UMAX:
12138       return LowerReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
12139     case ISD::VECREDUCE_UMIN:
12140       return LowerReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
12141     case ISD::VECREDUCE_XOR:
12142       return LowerReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
12143     case ISD::VECREDUCE_FADD:
12144       return LowerReductionToSVE(AArch64ISD::FADDV_PRED, Op, DAG);
12145     case ISD::VECREDUCE_FMAX:
12146       return LowerReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
12147     case ISD::VECREDUCE_FMIN:
12148       return LowerReductionToSVE(AArch64ISD::FMINNMV_PRED, Op, DAG);
12149     default:
12150       llvm_unreachable("Unhandled fixed length reduction");
12151     }
12152   }
12153 
12154   // Lower NEON reductions.
12155   SDLoc dl(Op);
12156   switch (Op.getOpcode()) {
12157   case ISD::VECREDUCE_ADD:
12158     return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
12159   case ISD::VECREDUCE_SMAX:
12160     return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
12161   case ISD::VECREDUCE_SMIN:
12162     return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
12163   case ISD::VECREDUCE_UMAX:
12164     return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
12165   case ISD::VECREDUCE_UMIN:
12166     return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
12167   case ISD::VECREDUCE_FMAX: {
12168     return DAG.getNode(
12169         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12170         DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
12171         Src);
12172   }
12173   case ISD::VECREDUCE_FMIN: {
12174     return DAG.getNode(
12175         ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
12176         DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
12177         Src);
12178   }
12179   default:
12180     llvm_unreachable("Unhandled reduction");
12181   }
12182 }
12183 
12184 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op,
12185                                                     SelectionDAG &DAG) const {
12186   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12187   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12188     return SDValue();
12189 
12190   // LSE has an atomic load-add instruction, but not a load-sub.
12191   SDLoc dl(Op);
12192   MVT VT = Op.getSimpleValueType();
12193   SDValue RHS = Op.getOperand(2);
12194   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12195   RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS);
12196   return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(),
12197                        Op.getOperand(0), Op.getOperand(1), RHS,
12198                        AN->getMemOperand());
12199 }
12200 
12201 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
12202                                                     SelectionDAG &DAG) const {
12203   auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
12204   if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics())
12205     return SDValue();
12206 
12207   // LSE has an atomic load-clear instruction, but not a load-and.
12208   SDLoc dl(Op);
12209   MVT VT = Op.getSimpleValueType();
12210   SDValue RHS = Op.getOperand(2);
12211   AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode());
12212   RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS);
12213   return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(),
12214                        Op.getOperand(0), Op.getOperand(1), RHS,
12215                        AN->getMemOperand());
12216 }
12217 
12218 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
12219     SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const {
12220   SDLoc dl(Op);
12221   EVT PtrVT = getPointerTy(DAG.getDataLayout());
12222   SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0);
12223 
12224   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
12225   const uint32_t *Mask = TRI->getWindowsStackProbePreservedMask();
12226   if (Subtarget->hasCustomCallingConv())
12227     TRI->UpdateCustomCallPreservedMask(DAG.getMachineFunction(), &Mask);
12228 
12229   Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size,
12230                      DAG.getConstant(4, dl, MVT::i64));
12231   Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue());
12232   Chain =
12233       DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue),
12234                   Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64),
12235                   DAG.getRegisterMask(Mask), Chain.getValue(1));
12236   // To match the actual intent better, we should read the output from X15 here
12237   // again (instead of potentially spilling it to the stack), but rereading Size
12238   // from X15 here doesn't work at -O0, since it thinks that X15 is undefined
12239   // here.
12240 
12241   Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size,
12242                      DAG.getConstant(4, dl, MVT::i64));
12243   return Chain;
12244 }
12245 
12246 SDValue
12247 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
12248                                                SelectionDAG &DAG) const {
12249   assert(Subtarget->isTargetWindows() &&
12250          "Only Windows alloca probing supported");
12251   SDLoc dl(Op);
12252   // Get the inputs.
12253   SDNode *Node = Op.getNode();
12254   SDValue Chain = Op.getOperand(0);
12255   SDValue Size = Op.getOperand(1);
12256   MaybeAlign Align =
12257       cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
12258   EVT VT = Node->getValueType(0);
12259 
12260   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
12261           "no-stack-arg-probe")) {
12262     SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12263     Chain = SP.getValue(1);
12264     SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12265     if (Align)
12266       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12267                        DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12268     Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12269     SDValue Ops[2] = {SP, Chain};
12270     return DAG.getMergeValues(Ops, dl);
12271   }
12272 
12273   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
12274 
12275   Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG);
12276 
12277   SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64);
12278   Chain = SP.getValue(1);
12279   SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
12280   if (Align)
12281     SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
12282                      DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
12283   Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
12284 
12285   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
12286                              DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
12287 
12288   SDValue Ops[2] = {SP, Chain};
12289   return DAG.getMergeValues(Ops, dl);
12290 }
12291 
12292 SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
12293                                            SelectionDAG &DAG) const {
12294   EVT VT = Op.getValueType();
12295   assert(VT != MVT::i64 && "Expected illegal VSCALE node");
12296 
12297   SDLoc DL(Op);
12298   APInt MulImm = cast<ConstantSDNode>(Op.getOperand(0))->getAPIntValue();
12299   return DAG.getZExtOrTrunc(DAG.getVScale(DL, MVT::i64, MulImm.sext(64)), DL,
12300                             VT);
12301 }
12302 
12303 /// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
12304 template <unsigned NumVecs>
12305 static bool
12306 setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
12307               AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {
12308   Info.opc = ISD::INTRINSIC_VOID;
12309   // Retrieve EC from first vector argument.
12310   const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType());
12311   ElementCount EC = VT.getVectorElementCount();
12312 #ifndef NDEBUG
12313   // Check the assumption that all input vectors are the same type.
12314   for (unsigned I = 0; I < NumVecs; ++I)
12315     assert(VT == TLI.getMemValueType(DL, CI.getArgOperand(I)->getType()) &&
12316            "Invalid type.");
12317 #endif
12318   // memVT is `NumVecs * VT`.
12319   Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
12320                                 EC * NumVecs);
12321   Info.ptrVal = CI.getArgOperand(CI.arg_size() - 1);
12322   Info.offset = 0;
12323   Info.align.reset();
12324   Info.flags = MachineMemOperand::MOStore;
12325   return true;
12326 }
12327 
12328 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12329 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
12330 /// specified in the intrinsic calls.
12331 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12332                                                const CallInst &I,
12333                                                MachineFunction &MF,
12334                                                unsigned Intrinsic) const {
12335   auto &DL = I.getModule()->getDataLayout();
12336   switch (Intrinsic) {
12337   case Intrinsic::aarch64_sve_st2:
12338     return setInfoSVEStN<2>(*this, DL, Info, I);
12339   case Intrinsic::aarch64_sve_st3:
12340     return setInfoSVEStN<3>(*this, DL, Info, I);
12341   case Intrinsic::aarch64_sve_st4:
12342     return setInfoSVEStN<4>(*this, DL, Info, I);
12343   case Intrinsic::aarch64_neon_ld2:
12344   case Intrinsic::aarch64_neon_ld3:
12345   case Intrinsic::aarch64_neon_ld4:
12346   case Intrinsic::aarch64_neon_ld1x2:
12347   case Intrinsic::aarch64_neon_ld1x3:
12348   case Intrinsic::aarch64_neon_ld1x4:
12349   case Intrinsic::aarch64_neon_ld2lane:
12350   case Intrinsic::aarch64_neon_ld3lane:
12351   case Intrinsic::aarch64_neon_ld4lane:
12352   case Intrinsic::aarch64_neon_ld2r:
12353   case Intrinsic::aarch64_neon_ld3r:
12354   case Intrinsic::aarch64_neon_ld4r: {
12355     Info.opc = ISD::INTRINSIC_W_CHAIN;
12356     // Conservatively set memVT to the entire set of vectors loaded.
12357     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12358     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12359     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12360     Info.offset = 0;
12361     Info.align.reset();
12362     // volatile loads with NEON intrinsics not supported
12363     Info.flags = MachineMemOperand::MOLoad;
12364     return true;
12365   }
12366   case Intrinsic::aarch64_neon_st2:
12367   case Intrinsic::aarch64_neon_st3:
12368   case Intrinsic::aarch64_neon_st4:
12369   case Intrinsic::aarch64_neon_st1x2:
12370   case Intrinsic::aarch64_neon_st1x3:
12371   case Intrinsic::aarch64_neon_st1x4:
12372   case Intrinsic::aarch64_neon_st2lane:
12373   case Intrinsic::aarch64_neon_st3lane:
12374   case Intrinsic::aarch64_neon_st4lane: {
12375     Info.opc = ISD::INTRINSIC_VOID;
12376     // Conservatively set memVT to the entire set of vectors stored.
12377     unsigned NumElts = 0;
12378     for (const Value *Arg : I.args()) {
12379       Type *ArgTy = Arg->getType();
12380       if (!ArgTy->isVectorTy())
12381         break;
12382       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12383     }
12384     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12385     Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
12386     Info.offset = 0;
12387     Info.align.reset();
12388     // volatile stores with NEON intrinsics not supported
12389     Info.flags = MachineMemOperand::MOStore;
12390     return true;
12391   }
12392   case Intrinsic::aarch64_ldaxr:
12393   case Intrinsic::aarch64_ldxr: {
12394     Type *ValTy = I.getParamElementType(0);
12395     Info.opc = ISD::INTRINSIC_W_CHAIN;
12396     Info.memVT = MVT::getVT(ValTy);
12397     Info.ptrVal = I.getArgOperand(0);
12398     Info.offset = 0;
12399     Info.align = DL.getABITypeAlign(ValTy);
12400     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12401     return true;
12402   }
12403   case Intrinsic::aarch64_stlxr:
12404   case Intrinsic::aarch64_stxr: {
12405     Type *ValTy = I.getParamElementType(1);
12406     Info.opc = ISD::INTRINSIC_W_CHAIN;
12407     Info.memVT = MVT::getVT(ValTy);
12408     Info.ptrVal = I.getArgOperand(1);
12409     Info.offset = 0;
12410     Info.align = DL.getABITypeAlign(ValTy);
12411     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12412     return true;
12413   }
12414   case Intrinsic::aarch64_ldaxp:
12415   case Intrinsic::aarch64_ldxp:
12416     Info.opc = ISD::INTRINSIC_W_CHAIN;
12417     Info.memVT = MVT::i128;
12418     Info.ptrVal = I.getArgOperand(0);
12419     Info.offset = 0;
12420     Info.align = Align(16);
12421     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
12422     return true;
12423   case Intrinsic::aarch64_stlxp:
12424   case Intrinsic::aarch64_stxp:
12425     Info.opc = ISD::INTRINSIC_W_CHAIN;
12426     Info.memVT = MVT::i128;
12427     Info.ptrVal = I.getArgOperand(2);
12428     Info.offset = 0;
12429     Info.align = Align(16);
12430     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
12431     return true;
12432   case Intrinsic::aarch64_sve_ldnt1: {
12433     Type *ElTy = cast<VectorType>(I.getType())->getElementType();
12434     Info.opc = ISD::INTRINSIC_W_CHAIN;
12435     Info.memVT = MVT::getVT(I.getType());
12436     Info.ptrVal = I.getArgOperand(1);
12437     Info.offset = 0;
12438     Info.align = DL.getABITypeAlign(ElTy);
12439     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
12440     return true;
12441   }
12442   case Intrinsic::aarch64_sve_stnt1: {
12443     Type *ElTy =
12444         cast<VectorType>(I.getArgOperand(0)->getType())->getElementType();
12445     Info.opc = ISD::INTRINSIC_W_CHAIN;
12446     Info.memVT = MVT::getVT(I.getOperand(0)->getType());
12447     Info.ptrVal = I.getArgOperand(2);
12448     Info.offset = 0;
12449     Info.align = DL.getABITypeAlign(ElTy);
12450     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
12451     return true;
12452   }
12453   case Intrinsic::aarch64_mops_memset_tag: {
12454     Value *Dst = I.getArgOperand(0);
12455     Value *Val = I.getArgOperand(1);
12456     Info.opc = ISD::INTRINSIC_W_CHAIN;
12457     Info.memVT = MVT::getVT(Val->getType());
12458     Info.ptrVal = Dst;
12459     Info.offset = 0;
12460     Info.align = I.getParamAlign(0).valueOrOne();
12461     Info.flags = MachineMemOperand::MOStore;
12462     // The size of the memory being operated on is unknown at this point
12463     Info.size = MemoryLocation::UnknownSize;
12464     return true;
12465   }
12466   default:
12467     break;
12468   }
12469 
12470   return false;
12471 }
12472 
12473 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
12474                                                   ISD::LoadExtType ExtTy,
12475                                                   EVT NewVT) const {
12476   // TODO: This may be worth removing. Check regression tests for diffs.
12477   if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
12478     return false;
12479 
12480   // If we're reducing the load width in order to avoid having to use an extra
12481   // instruction to do extension then it's probably a good idea.
12482   if (ExtTy != ISD::NON_EXTLOAD)
12483     return true;
12484   // Don't reduce load width if it would prevent us from combining a shift into
12485   // the offset.
12486   MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
12487   assert(Mem);
12488   const SDValue &Base = Mem->getBasePtr();
12489   if (Base.getOpcode() == ISD::ADD &&
12490       Base.getOperand(1).getOpcode() == ISD::SHL &&
12491       Base.getOperand(1).hasOneUse() &&
12492       Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
12493     // It's unknown whether a scalable vector has a power-of-2 bitwidth.
12494     if (Mem->getMemoryVT().isScalableVector())
12495       return false;
12496     // The shift can be combined if it matches the size of the value being
12497     // loaded (and so reducing the width would make it not match).
12498     uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
12499     uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
12500     if (ShiftAmount == Log2_32(LoadBytes))
12501       return false;
12502   }
12503   // We have no reason to disallow reducing the load width, so allow it.
12504   return true;
12505 }
12506 
12507 // Truncations from 64-bit GPR to 32-bit GPR is free.
12508 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
12509   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12510     return false;
12511   uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize();
12512   uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize();
12513   return NumBits1 > NumBits2;
12514 }
12515 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
12516   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12517     return false;
12518   uint64_t NumBits1 = VT1.getFixedSizeInBits();
12519   uint64_t NumBits2 = VT2.getFixedSizeInBits();
12520   return NumBits1 > NumBits2;
12521 }
12522 
12523 /// Check if it is profitable to hoist instruction in then/else to if.
12524 /// Not profitable if I and it's user can form a FMA instruction
12525 /// because we prefer FMSUB/FMADD.
12526 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
12527   if (I->getOpcode() != Instruction::FMul)
12528     return true;
12529 
12530   if (!I->hasOneUse())
12531     return true;
12532 
12533   Instruction *User = I->user_back();
12534 
12535   if (!(User->getOpcode() == Instruction::FSub ||
12536         User->getOpcode() == Instruction::FAdd))
12537     return true;
12538 
12539   const TargetOptions &Options = getTargetMachine().Options;
12540   const Function *F = I->getFunction();
12541   const DataLayout &DL = F->getParent()->getDataLayout();
12542   Type *Ty = User->getOperand(0)->getType();
12543 
12544   return !(isFMAFasterThanFMulAndFAdd(*F, Ty) &&
12545            isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
12546            (Options.AllowFPOpFusion == FPOpFusion::Fast ||
12547             Options.UnsafeFPMath));
12548 }
12549 
12550 // All 32-bit GPR operations implicitly zero the high-half of the corresponding
12551 // 64-bit GPR.
12552 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
12553   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
12554     return false;
12555   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
12556   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
12557   return NumBits1 == 32 && NumBits2 == 64;
12558 }
12559 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
12560   if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger())
12561     return false;
12562   unsigned NumBits1 = VT1.getSizeInBits();
12563   unsigned NumBits2 = VT2.getSizeInBits();
12564   return NumBits1 == 32 && NumBits2 == 64;
12565 }
12566 
12567 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
12568   EVT VT1 = Val.getValueType();
12569   if (isZExtFree(VT1, VT2)) {
12570     return true;
12571   }
12572 
12573   if (Val.getOpcode() != ISD::LOAD)
12574     return false;
12575 
12576   // 8-, 16-, and 32-bit integer loads all implicitly zero-extend.
12577   return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() &&
12578           VT2.isSimple() && !VT2.isVector() && VT2.isInteger() &&
12579           VT1.getSizeInBits() <= 32);
12580 }
12581 
12582 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
12583   if (isa<FPExtInst>(Ext))
12584     return false;
12585 
12586   // Vector types are not free.
12587   if (Ext->getType()->isVectorTy())
12588     return false;
12589 
12590   for (const Use &U : Ext->uses()) {
12591     // The extension is free if we can fold it with a left shift in an
12592     // addressing mode or an arithmetic operation: add, sub, and cmp.
12593 
12594     // Is there a shift?
12595     const Instruction *Instr = cast<Instruction>(U.getUser());
12596 
12597     // Is this a constant shift?
12598     switch (Instr->getOpcode()) {
12599     case Instruction::Shl:
12600       if (!isa<ConstantInt>(Instr->getOperand(1)))
12601         return false;
12602       break;
12603     case Instruction::GetElementPtr: {
12604       gep_type_iterator GTI = gep_type_begin(Instr);
12605       auto &DL = Ext->getModule()->getDataLayout();
12606       std::advance(GTI, U.getOperandNo()-1);
12607       Type *IdxTy = GTI.getIndexedType();
12608       // This extension will end up with a shift because of the scaling factor.
12609       // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
12610       // Get the shift amount based on the scaling factor:
12611       // log2(sizeof(IdxTy)) - log2(8).
12612       uint64_t ShiftAmt =
12613         countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3;
12614       // Is the constant foldable in the shift of the addressing mode?
12615       // I.e., shift amount is between 1 and 4 inclusive.
12616       if (ShiftAmt == 0 || ShiftAmt > 4)
12617         return false;
12618       break;
12619     }
12620     case Instruction::Trunc:
12621       // Check if this is a noop.
12622       // trunc(sext ty1 to ty2) to ty1.
12623       if (Instr->getType() == Ext->getOperand(0)->getType())
12624         continue;
12625       LLVM_FALLTHROUGH;
12626     default:
12627       return false;
12628     }
12629 
12630     // At this point we can use the bfm family, so this extension is free
12631     // for that use.
12632   }
12633   return true;
12634 }
12635 
12636 /// Check if both Op1 and Op2 are shufflevector extracts of either the lower
12637 /// or upper half of the vector elements.
12638 static bool areExtractShuffleVectors(Value *Op1, Value *Op2) {
12639   auto areTypesHalfed = [](Value *FullV, Value *HalfV) {
12640     auto *FullTy = FullV->getType();
12641     auto *HalfTy = HalfV->getType();
12642     return FullTy->getPrimitiveSizeInBits().getFixedSize() ==
12643            2 * HalfTy->getPrimitiveSizeInBits().getFixedSize();
12644   };
12645 
12646   auto extractHalf = [](Value *FullV, Value *HalfV) {
12647     auto *FullVT = cast<FixedVectorType>(FullV->getType());
12648     auto *HalfVT = cast<FixedVectorType>(HalfV->getType());
12649     return FullVT->getNumElements() == 2 * HalfVT->getNumElements();
12650   };
12651 
12652   ArrayRef<int> M1, M2;
12653   Value *S1Op1, *S2Op1;
12654   if (!match(Op1, m_Shuffle(m_Value(S1Op1), m_Undef(), m_Mask(M1))) ||
12655       !match(Op2, m_Shuffle(m_Value(S2Op1), m_Undef(), m_Mask(M2))))
12656     return false;
12657 
12658   // Check that the operands are half as wide as the result and we extract
12659   // half of the elements of the input vectors.
12660   if (!areTypesHalfed(S1Op1, Op1) || !areTypesHalfed(S2Op1, Op2) ||
12661       !extractHalf(S1Op1, Op1) || !extractHalf(S2Op1, Op2))
12662     return false;
12663 
12664   // Check the mask extracts either the lower or upper half of vector
12665   // elements.
12666   int M1Start = -1;
12667   int M2Start = -1;
12668   int NumElements = cast<FixedVectorType>(Op1->getType())->getNumElements() * 2;
12669   if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) ||
12670       !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) ||
12671       M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2)))
12672     return false;
12673 
12674   return true;
12675 }
12676 
12677 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
12678 /// of the vector elements.
12679 static bool areExtractExts(Value *Ext1, Value *Ext2) {
12680   auto areExtDoubled = [](Instruction *Ext) {
12681     return Ext->getType()->getScalarSizeInBits() ==
12682            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
12683   };
12684 
12685   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
12686       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
12687       !areExtDoubled(cast<Instruction>(Ext1)) ||
12688       !areExtDoubled(cast<Instruction>(Ext2)))
12689     return false;
12690 
12691   return true;
12692 }
12693 
12694 /// Check if Op could be used with vmull_high_p64 intrinsic.
12695 static bool isOperandOfVmullHighP64(Value *Op) {
12696   Value *VectorOperand = nullptr;
12697   ConstantInt *ElementIndex = nullptr;
12698   return match(Op, m_ExtractElt(m_Value(VectorOperand),
12699                                 m_ConstantInt(ElementIndex))) &&
12700          ElementIndex->getValue() == 1 &&
12701          isa<FixedVectorType>(VectorOperand->getType()) &&
12702          cast<FixedVectorType>(VectorOperand->getType())->getNumElements() == 2;
12703 }
12704 
12705 /// Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
12706 static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {
12707   return isOperandOfVmullHighP64(Op1) && isOperandOfVmullHighP64(Op2);
12708 }
12709 
12710 static bool isSplatShuffle(Value *V) {
12711   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V))
12712     return is_splat(Shuf->getShuffleMask());
12713   return false;
12714 }
12715 
12716 /// Check if sinking \p I's operands to I's basic block is profitable, because
12717 /// the operands can be folded into a target instruction, e.g.
12718 /// shufflevectors extracts and/or sext/zext can be folded into (u,s)subl(2).
12719 bool AArch64TargetLowering::shouldSinkOperands(
12720     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
12721   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
12722     switch (II->getIntrinsicID()) {
12723     case Intrinsic::aarch64_neon_smull:
12724     case Intrinsic::aarch64_neon_umull:
12725       if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) {
12726         Ops.push_back(&II->getOperandUse(0));
12727         Ops.push_back(&II->getOperandUse(1));
12728         return true;
12729       }
12730       LLVM_FALLTHROUGH;
12731 
12732     case Intrinsic::fma:
12733       if (isa<VectorType>(I->getType()) &&
12734           cast<VectorType>(I->getType())->getElementType()->isHalfTy() &&
12735           !Subtarget->hasFullFP16())
12736         return false;
12737       LLVM_FALLTHROUGH;
12738     case Intrinsic::aarch64_neon_sqdmull:
12739     case Intrinsic::aarch64_neon_sqdmulh:
12740     case Intrinsic::aarch64_neon_sqrdmulh:
12741       // Sink splats for index lane variants
12742       if (isSplatShuffle(II->getOperand(0)))
12743         Ops.push_back(&II->getOperandUse(0));
12744       if (isSplatShuffle(II->getOperand(1)))
12745         Ops.push_back(&II->getOperandUse(1));
12746       return !Ops.empty();
12747     case Intrinsic::aarch64_sme_write_horiz:
12748     case Intrinsic::aarch64_sme_write_vert:
12749     case Intrinsic::aarch64_sme_writeq_horiz:
12750     case Intrinsic::aarch64_sme_writeq_vert: {
12751       auto *Idx = dyn_cast<Instruction>(II->getOperand(1));
12752       if (!Idx || Idx->getOpcode() != Instruction::Add)
12753         return false;
12754       Ops.push_back(&II->getOperandUse(1));
12755       return true;
12756     }
12757     case Intrinsic::aarch64_sme_read_horiz:
12758     case Intrinsic::aarch64_sme_read_vert:
12759     case Intrinsic::aarch64_sme_readq_horiz:
12760     case Intrinsic::aarch64_sme_readq_vert:
12761     case Intrinsic::aarch64_sme_ld1b_vert:
12762     case Intrinsic::aarch64_sme_ld1h_vert:
12763     case Intrinsic::aarch64_sme_ld1w_vert:
12764     case Intrinsic::aarch64_sme_ld1d_vert:
12765     case Intrinsic::aarch64_sme_ld1q_vert:
12766     case Intrinsic::aarch64_sme_st1b_vert:
12767     case Intrinsic::aarch64_sme_st1h_vert:
12768     case Intrinsic::aarch64_sme_st1w_vert:
12769     case Intrinsic::aarch64_sme_st1d_vert:
12770     case Intrinsic::aarch64_sme_st1q_vert:
12771     case Intrinsic::aarch64_sme_ld1b_horiz:
12772     case Intrinsic::aarch64_sme_ld1h_horiz:
12773     case Intrinsic::aarch64_sme_ld1w_horiz:
12774     case Intrinsic::aarch64_sme_ld1d_horiz:
12775     case Intrinsic::aarch64_sme_ld1q_horiz:
12776     case Intrinsic::aarch64_sme_st1b_horiz:
12777     case Intrinsic::aarch64_sme_st1h_horiz:
12778     case Intrinsic::aarch64_sme_st1w_horiz:
12779     case Intrinsic::aarch64_sme_st1d_horiz:
12780     case Intrinsic::aarch64_sme_st1q_horiz: {
12781       auto *Idx = dyn_cast<Instruction>(II->getOperand(3));
12782       if (!Idx || Idx->getOpcode() != Instruction::Add)
12783         return false;
12784       Ops.push_back(&II->getOperandUse(3));
12785       return true;
12786     }
12787     case Intrinsic::aarch64_neon_pmull:
12788       if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1)))
12789         return false;
12790       Ops.push_back(&II->getOperandUse(0));
12791       Ops.push_back(&II->getOperandUse(1));
12792       return true;
12793     case Intrinsic::aarch64_neon_pmull64:
12794       if (!areOperandsOfVmullHighP64(II->getArgOperand(0),
12795                                      II->getArgOperand(1)))
12796         return false;
12797       Ops.push_back(&II->getArgOperandUse(0));
12798       Ops.push_back(&II->getArgOperandUse(1));
12799       return true;
12800     default:
12801       return false;
12802     }
12803   }
12804 
12805   if (!I->getType()->isVectorTy())
12806     return false;
12807 
12808   switch (I->getOpcode()) {
12809   case Instruction::Sub:
12810   case Instruction::Add: {
12811     if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
12812       return false;
12813 
12814     // If the exts' operands extract either the lower or upper elements, we
12815     // can sink them too.
12816     auto Ext1 = cast<Instruction>(I->getOperand(0));
12817     auto Ext2 = cast<Instruction>(I->getOperand(1));
12818     if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) {
12819       Ops.push_back(&Ext1->getOperandUse(0));
12820       Ops.push_back(&Ext2->getOperandUse(0));
12821     }
12822 
12823     Ops.push_back(&I->getOperandUse(0));
12824     Ops.push_back(&I->getOperandUse(1));
12825 
12826     return true;
12827   }
12828   case Instruction::Mul: {
12829     bool IsProfitable = false;
12830     for (auto &Op : I->operands()) {
12831       // Make sure we are not already sinking this operand
12832       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
12833         continue;
12834 
12835       ShuffleVectorInst *Shuffle = dyn_cast<ShuffleVectorInst>(Op);
12836       if (!Shuffle || !Shuffle->isZeroEltSplat())
12837         continue;
12838 
12839       Value *ShuffleOperand = Shuffle->getOperand(0);
12840       InsertElementInst *Insert = dyn_cast<InsertElementInst>(ShuffleOperand);
12841       if (!Insert)
12842         continue;
12843 
12844       Instruction *OperandInstr = dyn_cast<Instruction>(Insert->getOperand(1));
12845       if (!OperandInstr)
12846         continue;
12847 
12848       ConstantInt *ElementConstant =
12849           dyn_cast<ConstantInt>(Insert->getOperand(2));
12850       // Check that the insertelement is inserting into element 0
12851       if (!ElementConstant || ElementConstant->getZExtValue() != 0)
12852         continue;
12853 
12854       unsigned Opcode = OperandInstr->getOpcode();
12855       if (Opcode != Instruction::SExt && Opcode != Instruction::ZExt)
12856         continue;
12857 
12858       Ops.push_back(&Shuffle->getOperandUse(0));
12859       Ops.push_back(&Op);
12860       IsProfitable = true;
12861     }
12862 
12863     return IsProfitable;
12864   }
12865   default:
12866     return false;
12867   }
12868   return false;
12869 }
12870 
12871 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
12872                                           Align &RequiredAligment) const {
12873   if (!LoadedType.isSimple() ||
12874       (!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
12875     return false;
12876   // Cyclone supports unaligned accesses.
12877   RequiredAligment = Align(1);
12878   unsigned NumBits = LoadedType.getSizeInBits();
12879   return NumBits == 32 || NumBits == 64;
12880 }
12881 
12882 /// A helper function for determining the number of interleaved accesses we
12883 /// will generate when lowering accesses of the given type.
12884 unsigned AArch64TargetLowering::getNumInterleavedAccesses(
12885     VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {
12886   unsigned VecSize = UseScalable ? Subtarget->getMinSVEVectorSizeInBits() : 128;
12887   return std::max<unsigned>(1, (DL.getTypeSizeInBits(VecTy) + 127) / VecSize);
12888 }
12889 
12890 MachineMemOperand::Flags
12891 AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {
12892   if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
12893       I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
12894     return MOStridedAccess;
12895   return MachineMemOperand::MONone;
12896 }
12897 
12898 bool AArch64TargetLowering::isLegalInterleavedAccessType(
12899     VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {
12900 
12901   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
12902   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
12903   unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
12904 
12905   UseScalable = false;
12906 
12907   // Ensure the number of vector elements is greater than 1.
12908   if (NumElements < 2)
12909     return false;
12910 
12911   // Ensure the element type is legal.
12912   if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
12913     return false;
12914 
12915   if (Subtarget->useSVEForFixedLengthVectors() &&
12916       (VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
12917        (VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
12918         isPowerOf2_32(NumElements) && VecSize > 128))) {
12919     UseScalable = true;
12920     return true;
12921   }
12922 
12923   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
12924   // 128 will be split into multiple interleaved accesses.
12925   return VecSize == 64 || VecSize % 128 == 0;
12926 }
12927 
12928 static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {
12929   if (VTy->getElementType() == Type::getDoubleTy(VTy->getContext()))
12930     return ScalableVectorType::get(VTy->getElementType(), 2);
12931 
12932   if (VTy->getElementType() == Type::getFloatTy(VTy->getContext()))
12933     return ScalableVectorType::get(VTy->getElementType(), 4);
12934 
12935   if (VTy->getElementType() == Type::getBFloatTy(VTy->getContext()))
12936     return ScalableVectorType::get(VTy->getElementType(), 8);
12937 
12938   if (VTy->getElementType() == Type::getHalfTy(VTy->getContext()))
12939     return ScalableVectorType::get(VTy->getElementType(), 8);
12940 
12941   if (VTy->getElementType() == Type::getInt64Ty(VTy->getContext()))
12942     return ScalableVectorType::get(VTy->getElementType(), 2);
12943 
12944   if (VTy->getElementType() == Type::getInt32Ty(VTy->getContext()))
12945     return ScalableVectorType::get(VTy->getElementType(), 4);
12946 
12947   if (VTy->getElementType() == Type::getInt16Ty(VTy->getContext()))
12948     return ScalableVectorType::get(VTy->getElementType(), 8);
12949 
12950   if (VTy->getElementType() == Type::getInt8Ty(VTy->getContext()))
12951     return ScalableVectorType::get(VTy->getElementType(), 16);
12952 
12953   llvm_unreachable("Cannot handle input vector type");
12954 }
12955 
12956 /// Lower an interleaved load into a ldN intrinsic.
12957 ///
12958 /// E.g. Lower an interleaved load (Factor = 2):
12959 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr
12960 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
12961 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
12962 ///
12963 ///      Into:
12964 ///        %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr)
12965 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
12966 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
12967 bool AArch64TargetLowering::lowerInterleavedLoad(
12968     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
12969     ArrayRef<unsigned> Indices, unsigned Factor) const {
12970   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
12971          "Invalid interleave factor");
12972   assert(!Shuffles.empty() && "Empty shufflevector input");
12973   assert(Shuffles.size() == Indices.size() &&
12974          "Unmatched number of shufflevectors and indices");
12975 
12976   const DataLayout &DL = LI->getModule()->getDataLayout();
12977 
12978   VectorType *VTy = Shuffles[0]->getType();
12979 
12980   // Skip if we do not have NEON and skip illegal vector types. We can
12981   // "legalize" wide vector types into multiple interleaved accesses as long as
12982   // the vector types are divisible by 128.
12983   bool UseScalable;
12984   if (!Subtarget->hasNEON() ||
12985       !isLegalInterleavedAccessType(VTy, DL, UseScalable))
12986     return false;
12987 
12988   unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
12989 
12990   auto *FVTy = cast<FixedVectorType>(VTy);
12991 
12992   // A pointer vector can not be the return type of the ldN intrinsics. Need to
12993   // load integer vectors first and then convert to pointer vectors.
12994   Type *EltTy = FVTy->getElementType();
12995   if (EltTy->isPointerTy())
12996     FVTy =
12997         FixedVectorType::get(DL.getIntPtrType(EltTy), FVTy->getNumElements());
12998 
12999   // If we're going to generate more than one load, reset the sub-vector type
13000   // to something legal.
13001   FVTy = FixedVectorType::get(FVTy->getElementType(),
13002                               FVTy->getNumElements() / NumLoads);
13003 
13004   auto *LDVTy =
13005       UseScalable ? cast<VectorType>(getSVEContainerIRType(FVTy)) : FVTy;
13006 
13007   IRBuilder<> Builder(LI);
13008 
13009   // The base address of the load.
13010   Value *BaseAddr = LI->getPointerOperand();
13011 
13012   if (NumLoads > 1) {
13013     // We will compute the pointer operand of each load from the original base
13014     // address using GEPs. Cast the base address to a pointer to the scalar
13015     // element type.
13016     BaseAddr = Builder.CreateBitCast(
13017         BaseAddr,
13018         LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
13019   }
13020 
13021   Type *PtrTy =
13022       UseScalable
13023           ? LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())
13024           : LDVTy->getPointerTo(LI->getPointerAddressSpace());
13025   Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
13026                                  LDVTy->getElementCount());
13027 
13028   static const Intrinsic::ID SVELoadIntrs[3] = {
13029       Intrinsic::aarch64_sve_ld2_sret, Intrinsic::aarch64_sve_ld3_sret,
13030       Intrinsic::aarch64_sve_ld4_sret};
13031   static const Intrinsic::ID NEONLoadIntrs[3] = {Intrinsic::aarch64_neon_ld2,
13032                                                  Intrinsic::aarch64_neon_ld3,
13033                                                  Intrinsic::aarch64_neon_ld4};
13034   Function *LdNFunc;
13035   if (UseScalable)
13036     LdNFunc = Intrinsic::getDeclaration(LI->getModule(),
13037                                         SVELoadIntrs[Factor - 2], {LDVTy});
13038   else
13039     LdNFunc = Intrinsic::getDeclaration(
13040         LI->getModule(), NEONLoadIntrs[Factor - 2], {LDVTy, PtrTy});
13041 
13042   // Holds sub-vectors extracted from the load intrinsic return values. The
13043   // sub-vectors are associated with the shufflevector instructions they will
13044   // replace.
13045   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
13046 
13047   Value *PTrue = nullptr;
13048   if (UseScalable) {
13049     Optional<unsigned> PgPattern =
13050         getSVEPredPatternFromNumElements(FVTy->getNumElements());
13051     if (Subtarget->getMinSVEVectorSizeInBits() ==
13052             Subtarget->getMaxSVEVectorSizeInBits() &&
13053         Subtarget->getMinSVEVectorSizeInBits() == DL.getTypeSizeInBits(FVTy))
13054       PgPattern = AArch64SVEPredPattern::all;
13055 
13056     auto *PTruePat =
13057         ConstantInt::get(Type::getInt32Ty(LDVTy->getContext()), *PgPattern);
13058     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13059                                     {PTruePat});
13060   }
13061 
13062   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
13063 
13064     // If we're generating more than one load, compute the base address of
13065     // subsequent loads as an offset from the previous.
13066     if (LoadCount > 0)
13067       BaseAddr = Builder.CreateConstGEP1_32(LDVTy->getElementType(), BaseAddr,
13068                                             FVTy->getNumElements() * Factor);
13069 
13070     CallInst *LdN;
13071     if (UseScalable)
13072       LdN = Builder.CreateCall(
13073           LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
13074     else
13075       LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
13076                                "ldN");
13077 
13078     // Extract and store the sub-vectors returned by the load intrinsic.
13079     for (unsigned i = 0; i < Shuffles.size(); i++) {
13080       ShuffleVectorInst *SVI = Shuffles[i];
13081       unsigned Index = Indices[i];
13082 
13083       Value *SubVec = Builder.CreateExtractValue(LdN, Index);
13084 
13085       if (UseScalable)
13086         SubVec = Builder.CreateExtractVector(
13087             FVTy, SubVec,
13088             ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0));
13089 
13090       // Convert the integer vector to pointer vector if the element is pointer.
13091       if (EltTy->isPointerTy())
13092         SubVec = Builder.CreateIntToPtr(
13093             SubVec, FixedVectorType::get(SVI->getType()->getElementType(),
13094                                          FVTy->getNumElements()));
13095 
13096       SubVecs[SVI].push_back(SubVec);
13097     }
13098   }
13099 
13100   // Replace uses of the shufflevector instructions with the sub-vectors
13101   // returned by the load intrinsic. If a shufflevector instruction is
13102   // associated with more than one sub-vector, those sub-vectors will be
13103   // concatenated into a single wide vector.
13104   for (ShuffleVectorInst *SVI : Shuffles) {
13105     auto &SubVec = SubVecs[SVI];
13106     auto *WideVec =
13107         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
13108     SVI->replaceAllUsesWith(WideVec);
13109   }
13110 
13111   return true;
13112 }
13113 
13114 /// Lower an interleaved store into a stN intrinsic.
13115 ///
13116 /// E.g. Lower an interleaved store (Factor = 3):
13117 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13118 ///                 <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13119 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13120 ///
13121 ///      Into:
13122 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13123 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13124 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13125 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13126 ///
13127 /// Note that the new shufflevectors will be removed and we'll only generate one
13128 /// st3 instruction in CodeGen.
13129 ///
13130 /// Example for a more general valid mask (Factor 3). Lower:
13131 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13132 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13133 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
13134 ///
13135 ///      Into:
13136 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13137 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13138 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13139 ///        call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr)
13140 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
13141                                                   ShuffleVectorInst *SVI,
13142                                                   unsigned Factor) const {
13143   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13144          "Invalid interleave factor");
13145 
13146   auto *VecTy = cast<FixedVectorType>(SVI->getType());
13147   assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
13148 
13149   unsigned LaneLen = VecTy->getNumElements() / Factor;
13150   Type *EltTy = VecTy->getElementType();
13151   auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
13152 
13153   const DataLayout &DL = SI->getModule()->getDataLayout();
13154   bool UseScalable;
13155 
13156   // Skip if we do not have NEON and skip illegal vector types. We can
13157   // "legalize" wide vector types into multiple interleaved accesses as long as
13158   // the vector types are divisible by 128.
13159   if (!Subtarget->hasNEON() ||
13160       !isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
13161     return false;
13162 
13163   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
13164 
13165   Value *Op0 = SVI->getOperand(0);
13166   Value *Op1 = SVI->getOperand(1);
13167   IRBuilder<> Builder(SI);
13168 
13169   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13170   // vectors to integer vectors.
13171   if (EltTy->isPointerTy()) {
13172     Type *IntTy = DL.getIntPtrType(EltTy);
13173     unsigned NumOpElts =
13174         cast<FixedVectorType>(Op0->getType())->getNumElements();
13175 
13176     // Convert to the corresponding integer vector.
13177     auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts);
13178     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13179     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13180 
13181     SubVecTy = FixedVectorType::get(IntTy, LaneLen);
13182   }
13183 
13184   // If we're going to generate more than one store, reset the lane length
13185   // and sub-vector type to something legal.
13186   LaneLen /= NumStores;
13187   SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
13188 
13189   auto *STVTy = UseScalable ? cast<VectorType>(getSVEContainerIRType(SubVecTy))
13190                             : SubVecTy;
13191 
13192   // The base address of the store.
13193   Value *BaseAddr = SI->getPointerOperand();
13194 
13195   if (NumStores > 1) {
13196     // We will compute the pointer operand of each store from the original base
13197     // address using GEPs. Cast the base address to a pointer to the scalar
13198     // element type.
13199     BaseAddr = Builder.CreateBitCast(
13200         BaseAddr,
13201         SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
13202   }
13203 
13204   auto Mask = SVI->getShuffleMask();
13205 
13206   Type *PtrTy =
13207       UseScalable
13208           ? STVTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())
13209           : STVTy->getPointerTo(SI->getPointerAddressSpace());
13210   Type *PredTy = VectorType::get(Type::getInt1Ty(STVTy->getContext()),
13211                                  STVTy->getElementCount());
13212 
13213   static const Intrinsic::ID SVEStoreIntrs[3] = {Intrinsic::aarch64_sve_st2,
13214                                                  Intrinsic::aarch64_sve_st3,
13215                                                  Intrinsic::aarch64_sve_st4};
13216   static const Intrinsic::ID NEONStoreIntrs[3] = {Intrinsic::aarch64_neon_st2,
13217                                                   Intrinsic::aarch64_neon_st3,
13218                                                   Intrinsic::aarch64_neon_st4};
13219   Function *StNFunc;
13220   if (UseScalable)
13221     StNFunc = Intrinsic::getDeclaration(SI->getModule(),
13222                                         SVEStoreIntrs[Factor - 2], {STVTy});
13223   else
13224     StNFunc = Intrinsic::getDeclaration(
13225         SI->getModule(), NEONStoreIntrs[Factor - 2], {STVTy, PtrTy});
13226 
13227   Value *PTrue = nullptr;
13228   if (UseScalable) {
13229     Optional<unsigned> PgPattern =
13230         getSVEPredPatternFromNumElements(SubVecTy->getNumElements());
13231     if (Subtarget->getMinSVEVectorSizeInBits() ==
13232             Subtarget->getMaxSVEVectorSizeInBits() &&
13233         Subtarget->getMinSVEVectorSizeInBits() ==
13234             DL.getTypeSizeInBits(SubVecTy))
13235       PgPattern = AArch64SVEPredPattern::all;
13236 
13237     auto *PTruePat =
13238         ConstantInt::get(Type::getInt32Ty(STVTy->getContext()), *PgPattern);
13239     PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredTy},
13240                                     {PTruePat});
13241   }
13242 
13243   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
13244 
13245     SmallVector<Value *, 5> Ops;
13246 
13247     // Split the shufflevector operands into sub vectors for the new stN call.
13248     for (unsigned i = 0; i < Factor; i++) {
13249       Value *Shuffle;
13250       unsigned IdxI = StoreCount * LaneLen * Factor + i;
13251       if (Mask[IdxI] >= 0) {
13252         Shuffle = Builder.CreateShuffleVector(
13253             Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0));
13254       } else {
13255         unsigned StartMask = 0;
13256         for (unsigned j = 1; j < LaneLen; j++) {
13257           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
13258           if (Mask[IdxJ * Factor + IdxI] >= 0) {
13259             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
13260             break;
13261           }
13262         }
13263         // Note: Filling undef gaps with random elements is ok, since
13264         // those elements were being written anyway (with undefs).
13265         // In the case of all undefs we're defaulting to using elems from 0
13266         // Note: StartMask cannot be negative, it's checked in
13267         // isReInterleaveMask
13268         Shuffle = Builder.CreateShuffleVector(
13269             Op0, Op1, createSequentialMask(StartMask, LaneLen, 0));
13270       }
13271 
13272       if (UseScalable)
13273         Shuffle = Builder.CreateInsertVector(
13274             STVTy, UndefValue::get(STVTy), Shuffle,
13275             ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0));
13276 
13277       Ops.push_back(Shuffle);
13278     }
13279 
13280     if (UseScalable)
13281       Ops.push_back(PTrue);
13282 
13283     // If we generating more than one store, we compute the base address of
13284     // subsequent stores as an offset from the previous.
13285     if (StoreCount > 0)
13286       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
13287                                             BaseAddr, LaneLen * Factor);
13288 
13289     Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
13290     Builder.CreateCall(StNFunc, Ops);
13291   }
13292   return true;
13293 }
13294 
13295 // Lower an SVE structured load intrinsic returning a tuple type to target
13296 // specific intrinsic taking the same input but returning a multi-result value
13297 // of the split tuple type.
13298 //
13299 // E.g. Lowering an LD3:
13300 //
13301 //  call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32(
13302 //                                                    <vscale x 4 x i1> %pred,
13303 //                                                    <vscale x 4 x i32>* %addr)
13304 //
13305 //  Output DAG:
13306 //
13307 //    t0: ch = EntryToken
13308 //        t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0
13309 //        t4: i64,ch = CopyFromReg t0, Register:i64 %1
13310 //    t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4
13311 //    t6: nxv12i32 = concat_vectors t5, t5:1, t5:2
13312 //
13313 // This is called pre-legalization to avoid widening/splitting issues with
13314 // non-power-of-2 tuple types used for LD3, such as nxv12i32.
13315 SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic,
13316                                                   ArrayRef<SDValue> LoadOps,
13317                                                   EVT VT, SelectionDAG &DAG,
13318                                                   const SDLoc &DL) const {
13319   assert(VT.isScalableVector() && "Can only lower scalable vectors");
13320 
13321   unsigned N, Opcode;
13322   static const std::pair<unsigned, std::pair<unsigned, unsigned>>
13323       IntrinsicMap[] = {
13324           {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}},
13325           {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}},
13326           {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}};
13327 
13328   std::tie(N, Opcode) = llvm::find_if(IntrinsicMap, [&](auto P) {
13329                           return P.first == Intrinsic;
13330                         })->second;
13331   assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 &&
13332          "invalid tuple vector type!");
13333 
13334   EVT SplitVT =
13335       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
13336                        VT.getVectorElementCount().divideCoefficientBy(N));
13337   assert(isTypeLegal(SplitVT));
13338 
13339   SmallVector<EVT, 5> VTs(N, SplitVT);
13340   VTs.push_back(MVT::Other); // Chain
13341   SDVTList NodeTys = DAG.getVTList(VTs);
13342 
13343   SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps);
13344   SmallVector<SDValue, 4> PseudoLoadOps;
13345   for (unsigned I = 0; I < N; ++I)
13346     PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I));
13347   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps);
13348 }
13349 
13350 EVT AArch64TargetLowering::getOptimalMemOpType(
13351     const MemOp &Op, const AttributeList &FuncAttributes) const {
13352   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13353   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13354   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13355   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13356   // taken one instruction to materialize the v2i64 zero and one store (with
13357   // restrictive addressing mode). Just do i64 stores.
13358   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13359   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13360     if (Op.isAligned(AlignCheck))
13361       return true;
13362     bool Fast;
13363     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13364                                           MachineMemOperand::MONone, &Fast) &&
13365            Fast;
13366   };
13367 
13368   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13369       AlignmentIsAcceptable(MVT::v16i8, Align(16)))
13370     return MVT::v16i8;
13371   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13372     return MVT::f128;
13373   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13374     return MVT::i64;
13375   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13376     return MVT::i32;
13377   return MVT::Other;
13378 }
13379 
13380 LLT AArch64TargetLowering::getOptimalMemOpLLT(
13381     const MemOp &Op, const AttributeList &FuncAttributes) const {
13382   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
13383   bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
13384   bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
13385   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
13386   // taken one instruction to materialize the v2i64 zero and one store (with
13387   // restrictive addressing mode). Just do i64 stores.
13388   bool IsSmallMemset = Op.isMemset() && Op.size() < 32;
13389   auto AlignmentIsAcceptable = [&](EVT VT, Align AlignCheck) {
13390     if (Op.isAligned(AlignCheck))
13391       return true;
13392     bool Fast;
13393     return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
13394                                           MachineMemOperand::MONone, &Fast) &&
13395            Fast;
13396   };
13397 
13398   if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
13399       AlignmentIsAcceptable(MVT::v2i64, Align(16)))
13400     return LLT::fixed_vector(2, 64);
13401   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
13402     return LLT::scalar(128);
13403   if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
13404     return LLT::scalar(64);
13405   if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
13406     return LLT::scalar(32);
13407   return LLT();
13408 }
13409 
13410 // 12-bit optionally shifted immediates are legal for adds.
13411 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
13412   if (Immed == std::numeric_limits<int64_t>::min()) {
13413     LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
13414                       << ": avoid UB for INT64_MIN\n");
13415     return false;
13416   }
13417   // Same encoding for add/sub, just flip the sign.
13418   Immed = std::abs(Immed);
13419   bool IsLegal = ((Immed >> 12) == 0 ||
13420                   ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
13421   LLVM_DEBUG(dbgs() << "Is " << Immed
13422                     << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
13423   return IsLegal;
13424 }
13425 
13426 // Return false to prevent folding
13427 // (mul (add x, c1), c2) -> (add (mul x, c2), c2*c1) in DAGCombine,
13428 // if the folding leads to worse code.
13429 bool AArch64TargetLowering::isMulAddWithConstProfitable(
13430     SDValue AddNode, SDValue ConstNode) const {
13431   // Let the DAGCombiner decide for vector types and large types.
13432   const EVT VT = AddNode.getValueType();
13433   if (VT.isVector() || VT.getScalarSizeInBits() > 64)
13434     return true;
13435 
13436   // It is worse if c1 is legal add immediate, while c1*c2 is not
13437   // and has to be composed by at least two instructions.
13438   const ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
13439   const ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
13440   const int64_t C1 = C1Node->getSExtValue();
13441   const APInt C1C2 = C1Node->getAPIntValue() * C2Node->getAPIntValue();
13442   if (!isLegalAddImmediate(C1) || isLegalAddImmediate(C1C2.getSExtValue()))
13443     return true;
13444   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
13445   AArch64_IMM::expandMOVImm(C1C2.getZExtValue(), VT.getSizeInBits(), Insn);
13446   if (Insn.size() > 1)
13447     return false;
13448 
13449   // Default to true and let the DAGCombiner decide.
13450   return true;
13451 }
13452 
13453 // Integer comparisons are implemented with ADDS/SUBS, so the range of valid
13454 // immediates is the same as for an add or a sub.
13455 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
13456   return isLegalAddImmediate(Immed);
13457 }
13458 
13459 /// isLegalAddressingMode - Return true if the addressing mode represented
13460 /// by AM is legal for this target, for a load/store of the specified type.
13461 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
13462                                                   const AddrMode &AM, Type *Ty,
13463                                                   unsigned AS, Instruction *I) const {
13464   // AArch64 has five basic addressing modes:
13465   //  reg
13466   //  reg + 9-bit signed offset
13467   //  reg + SIZE_IN_BYTES * 12-bit unsigned offset
13468   //  reg1 + reg2
13469   //  reg + SIZE_IN_BYTES * reg
13470 
13471   // No global is ever allowed as a base.
13472   if (AM.BaseGV)
13473     return false;
13474 
13475   // No reg+reg+imm addressing.
13476   if (AM.HasBaseReg && AM.BaseOffs && AM.Scale)
13477     return false;
13478 
13479   // FIXME: Update this method to support scalable addressing modes.
13480   if (isa<ScalableVectorType>(Ty)) {
13481     uint64_t VecElemNumBytes =
13482         DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
13483     return AM.HasBaseReg && !AM.BaseOffs &&
13484            (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
13485   }
13486 
13487   // check reg + imm case:
13488   // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
13489   uint64_t NumBytes = 0;
13490   if (Ty->isSized()) {
13491     uint64_t NumBits = DL.getTypeSizeInBits(Ty);
13492     NumBytes = NumBits / 8;
13493     if (!isPowerOf2_64(NumBits))
13494       NumBytes = 0;
13495   }
13496 
13497   if (!AM.Scale) {
13498     int64_t Offset = AM.BaseOffs;
13499 
13500     // 9-bit signed offset
13501     if (isInt<9>(Offset))
13502       return true;
13503 
13504     // 12-bit unsigned offset
13505     unsigned shift = Log2_64(NumBytes);
13506     if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
13507         // Must be a multiple of NumBytes (NumBytes is a power of 2)
13508         (Offset >> shift) << shift == Offset)
13509       return true;
13510     return false;
13511   }
13512 
13513   // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
13514 
13515   return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
13516 }
13517 
13518 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {
13519   // Consider splitting large offset of struct or array.
13520   return true;
13521 }
13522 
13523 InstructionCost AArch64TargetLowering::getScalingFactorCost(
13524     const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const {
13525   // Scaling factors are not free at all.
13526   // Operands                     | Rt Latency
13527   // -------------------------------------------
13528   // Rt, [Xn, Xm]                 | 4
13529   // -------------------------------------------
13530   // Rt, [Xn, Xm, lsl #imm]       | Rn: 4 Rm: 5
13531   // Rt, [Xn, Wm, <extend> #imm]  |
13532   if (isLegalAddressingMode(DL, AM, Ty, AS))
13533     // Scale represents reg2 * scale, thus account for 1 if
13534     // it is not equal to 0 or 1.
13535     return AM.Scale != 0 && AM.Scale != 1;
13536   return -1;
13537 }
13538 
13539 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
13540     const MachineFunction &MF, EVT VT) const {
13541   VT = VT.getScalarType();
13542 
13543   if (!VT.isSimple())
13544     return false;
13545 
13546   switch (VT.getSimpleVT().SimpleTy) {
13547   case MVT::f16:
13548     return Subtarget->hasFullFP16();
13549   case MVT::f32:
13550   case MVT::f64:
13551     return true;
13552   default:
13553     break;
13554   }
13555 
13556   return false;
13557 }
13558 
13559 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
13560                                                        Type *Ty) const {
13561   switch (Ty->getScalarType()->getTypeID()) {
13562   case Type::FloatTyID:
13563   case Type::DoubleTyID:
13564     return true;
13565   default:
13566     return false;
13567   }
13568 }
13569 
13570 bool AArch64TargetLowering::generateFMAsInMachineCombiner(
13571     EVT VT, CodeGenOpt::Level OptLevel) const {
13572   return (OptLevel >= CodeGenOpt::Aggressive) && !VT.isScalableVector() &&
13573          !useSVEForFixedLengthVectorVT(VT);
13574 }
13575 
13576 const MCPhysReg *
13577 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {
13578   // LR is a callee-save register, but we must treat it as clobbered by any call
13579   // site. Hence we include LR in the scratch registers, which are in turn added
13580   // as implicit-defs for stackmaps and patchpoints.
13581   static const MCPhysReg ScratchRegs[] = {
13582     AArch64::X16, AArch64::X17, AArch64::LR, 0
13583   };
13584   return ScratchRegs;
13585 }
13586 
13587 bool
13588 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
13589                                                      CombineLevel Level) const {
13590   N = N->getOperand(0).getNode();
13591   EVT VT = N->getValueType(0);
13592     // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine
13593     // it with shift to let it be lowered to UBFX.
13594   if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) &&
13595       isa<ConstantSDNode>(N->getOperand(1))) {
13596     uint64_t TruncMask = N->getConstantOperandVal(1);
13597     if (isMask_64(TruncMask) &&
13598       N->getOperand(0).getOpcode() == ISD::SRL &&
13599       isa<ConstantSDNode>(N->getOperand(0)->getOperand(1)))
13600       return false;
13601   }
13602   return true;
13603 }
13604 
13605 bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
13606     const SDNode *N, CombineLevel Level) const {
13607   assert(((N->getOpcode() == ISD::SHL &&
13608            N->getOperand(0).getOpcode() == ISD::SRL) ||
13609           (N->getOpcode() == ISD::SRL &&
13610            N->getOperand(0).getOpcode() == ISD::SHL)) &&
13611          "Expected shift-shift mask");
13612   // Don't allow multiuse shift folding with the same shift amount.
13613   if (!N->getOperand(0)->hasOneUse())
13614     return false;
13615 
13616   // Only fold srl(shl(x,c1),c2) iff C1 >= C2 to prevent loss of UBFX patterns.
13617   EVT VT = N->getValueType(0);
13618   if (N->getOpcode() == ISD::SRL && (VT == MVT::i32 || VT == MVT::i64)) {
13619     auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1));
13620     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
13621     return (!C1 || !C2 || C1->getZExtValue() >= C2->getZExtValue());
13622   }
13623 
13624   return true;
13625 }
13626 
13627 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13628                                                               Type *Ty) const {
13629   assert(Ty->isIntegerTy());
13630 
13631   unsigned BitSize = Ty->getPrimitiveSizeInBits();
13632   if (BitSize == 0)
13633     return false;
13634 
13635   int64_t Val = Imm.getSExtValue();
13636   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize))
13637     return true;
13638 
13639   if ((int64_t)Val < 0)
13640     Val = ~Val;
13641   if (BitSize == 32)
13642     Val &= (1LL << 32) - 1;
13643 
13644   unsigned LZ = countLeadingZeros((uint64_t)Val);
13645   unsigned Shift = (63 - LZ) / 16;
13646   // MOVZ is free so return true for one or fewer MOVK.
13647   return Shift < 3;
13648 }
13649 
13650 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
13651                                                     unsigned Index) const {
13652   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13653     return false;
13654 
13655   return (Index == 0 || Index == ResVT.getVectorMinNumElements());
13656 }
13657 
13658 /// Turn vector tests of the signbit in the form of:
13659 ///   xor (sra X, elt_size(X)-1), -1
13660 /// into:
13661 ///   cmge X, X, #0
13662 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
13663                                          const AArch64Subtarget *Subtarget) {
13664   EVT VT = N->getValueType(0);
13665   if (!Subtarget->hasNEON() || !VT.isVector())
13666     return SDValue();
13667 
13668   // There must be a shift right algebraic before the xor, and the xor must be a
13669   // 'not' operation.
13670   SDValue Shift = N->getOperand(0);
13671   SDValue Ones = N->getOperand(1);
13672   if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
13673       !ISD::isBuildVectorAllOnes(Ones.getNode()))
13674     return SDValue();
13675 
13676   // The shift should be smearing the sign bit across each vector element.
13677   auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
13678   EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
13679   if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
13680     return SDValue();
13681 
13682   return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
13683 }
13684 
13685 // Given a vecreduce_add node, detect the below pattern and convert it to the
13686 // node sequence with UABDL, [S|U]ADB and UADDLP.
13687 //
13688 // i32 vecreduce_add(
13689 //  v16i32 abs(
13690 //    v16i32 sub(
13691 //     v16i32 [sign|zero]_extend(v16i8 a), v16i32 [sign|zero]_extend(v16i8 b))))
13692 // =================>
13693 // i32 vecreduce_add(
13694 //   v4i32 UADDLP(
13695 //     v8i16 add(
13696 //       v8i16 zext(
13697 //         v8i8 [S|U]ABD low8:v16i8 a, low8:v16i8 b
13698 //       v8i16 zext(
13699 //         v8i8 [S|U]ABD high8:v16i8 a, high8:v16i8 b
13700 static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
13701                                                     SelectionDAG &DAG) {
13702   // Assumed i32 vecreduce_add
13703   if (N->getValueType(0) != MVT::i32)
13704     return SDValue();
13705 
13706   SDValue VecReduceOp0 = N->getOperand(0);
13707   unsigned Opcode = VecReduceOp0.getOpcode();
13708   // Assumed v16i32 abs
13709   if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32)
13710     return SDValue();
13711 
13712   SDValue ABS = VecReduceOp0;
13713   // Assumed v16i32 sub
13714   if (ABS->getOperand(0)->getOpcode() != ISD::SUB ||
13715       ABS->getOperand(0)->getValueType(0) != MVT::v16i32)
13716     return SDValue();
13717 
13718   SDValue SUB = ABS->getOperand(0);
13719   unsigned Opcode0 = SUB->getOperand(0).getOpcode();
13720   unsigned Opcode1 = SUB->getOperand(1).getOpcode();
13721   // Assumed v16i32 type
13722   if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 ||
13723       SUB->getOperand(1)->getValueType(0) != MVT::v16i32)
13724     return SDValue();
13725 
13726   // Assumed zext or sext
13727   bool IsZExt = false;
13728   if (Opcode0 == ISD::ZERO_EXTEND && Opcode1 == ISD::ZERO_EXTEND) {
13729     IsZExt = true;
13730   } else if (Opcode0 == ISD::SIGN_EXTEND && Opcode1 == ISD::SIGN_EXTEND) {
13731     IsZExt = false;
13732   } else
13733     return SDValue();
13734 
13735   SDValue EXT0 = SUB->getOperand(0);
13736   SDValue EXT1 = SUB->getOperand(1);
13737   // Assumed zext's operand has v16i8 type
13738   if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 ||
13739       EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
13740     return SDValue();
13741 
13742   // Pattern is dectected. Let's convert it to sequence of nodes.
13743   SDLoc DL(N);
13744 
13745   // First, create the node pattern of UABD/SABD.
13746   SDValue UABDHigh8Op0 =
13747       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13748                   DAG.getConstant(8, DL, MVT::i64));
13749   SDValue UABDHigh8Op1 =
13750       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13751                   DAG.getConstant(8, DL, MVT::i64));
13752   SDValue UABDHigh8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13753                                   UABDHigh8Op0, UABDHigh8Op1);
13754   SDValue UABDL = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDHigh8);
13755 
13756   // Second, create the node pattern of UABAL.
13757   SDValue UABDLo8Op0 =
13758       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0),
13759                   DAG.getConstant(0, DL, MVT::i64));
13760   SDValue UABDLo8Op1 =
13761       DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0),
13762                   DAG.getConstant(0, DL, MVT::i64));
13763   SDValue UABDLo8 = DAG.getNode(IsZExt ? ISD::ABDU : ISD::ABDS, DL, MVT::v8i8,
13764                                 UABDLo8Op0, UABDLo8Op1);
13765   SDValue ZExtUABD = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, UABDLo8);
13766   SDValue UABAL = DAG.getNode(ISD::ADD, DL, MVT::v8i16, UABDL, ZExtUABD);
13767 
13768   // Third, create the node of UADDLP.
13769   SDValue UADDLP = DAG.getNode(AArch64ISD::UADDLP, DL, MVT::v4i32, UABAL);
13770 
13771   // Fourth, create the node of VECREDUCE_ADD.
13772   return DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, UADDLP);
13773 }
13774 
13775 // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce
13776 //   vecreduce.add(ext(A)) to vecreduce.add(DOT(zero, A, one))
13777 //   vecreduce.add(mul(ext(A), ext(B))) to vecreduce.add(DOT(zero, A, B))
13778 static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
13779                                           const AArch64Subtarget *ST) {
13780   if (!ST->hasDotProd())
13781     return performVecReduceAddCombineWithUADDLP(N, DAG);
13782 
13783   SDValue Op0 = N->getOperand(0);
13784   if (N->getValueType(0) != MVT::i32 ||
13785       Op0.getValueType().getVectorElementType() != MVT::i32)
13786     return SDValue();
13787 
13788   unsigned ExtOpcode = Op0.getOpcode();
13789   SDValue A = Op0;
13790   SDValue B;
13791   if (ExtOpcode == ISD::MUL) {
13792     A = Op0.getOperand(0);
13793     B = Op0.getOperand(1);
13794     if (A.getOpcode() != B.getOpcode() ||
13795         A.getOperand(0).getValueType() != B.getOperand(0).getValueType())
13796       return SDValue();
13797     ExtOpcode = A.getOpcode();
13798   }
13799   if (ExtOpcode != ISD::ZERO_EXTEND && ExtOpcode != ISD::SIGN_EXTEND)
13800     return SDValue();
13801 
13802   EVT Op0VT = A.getOperand(0).getValueType();
13803   if (Op0VT != MVT::v8i8 && Op0VT != MVT::v16i8)
13804     return SDValue();
13805 
13806   SDLoc DL(Op0);
13807   // For non-mla reductions B can be set to 1. For MLA we take the operand of
13808   // the extend B.
13809   if (!B)
13810     B = DAG.getConstant(1, DL, Op0VT);
13811   else
13812     B = B.getOperand(0);
13813 
13814   SDValue Zeros =
13815       DAG.getConstant(0, DL, Op0VT == MVT::v8i8 ? MVT::v2i32 : MVT::v4i32);
13816   auto DotOpcode =
13817       (ExtOpcode == ISD::ZERO_EXTEND) ? AArch64ISD::UDOT : AArch64ISD::SDOT;
13818   SDValue Dot = DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros,
13819                             A.getOperand(0), B);
13820   return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
13821 }
13822 
13823 // Given an (integer) vecreduce, we know the order of the inputs does not
13824 // matter. We can convert UADDV(add(zext(extract_lo(x)), zext(extract_hi(x))))
13825 // into UADDV(UADDLP(x)). This can also happen through an extra add, where we
13826 // transform UADDV(add(y, add(zext(extract_lo(x)), zext(extract_hi(x))))).
13827 static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
13828   auto DetectAddExtract = [&](SDValue A) {
13829     // Look for add(zext(extract_lo(x)), zext(extract_hi(x))), returning
13830     // UADDLP(x) if found.
13831     if (A.getOpcode() != ISD::ADD)
13832       return SDValue();
13833     EVT VT = A.getValueType();
13834     SDValue Op0 = A.getOperand(0);
13835     SDValue Op1 = A.getOperand(1);
13836     if (Op0.getOpcode() != Op0.getOpcode() ||
13837         (Op0.getOpcode() != ISD::ZERO_EXTEND &&
13838          Op0.getOpcode() != ISD::SIGN_EXTEND))
13839       return SDValue();
13840     SDValue Ext0 = Op0.getOperand(0);
13841     SDValue Ext1 = Op1.getOperand(0);
13842     if (Ext0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13843         Ext1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13844         Ext0.getOperand(0) != Ext1.getOperand(0))
13845       return SDValue();
13846     // Check that the type is twice the add types, and the extract are from
13847     // upper/lower parts of the same source.
13848     if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
13849         VT.getVectorNumElements() * 2)
13850       return SDValue();
13851     if ((Ext0.getConstantOperandVal(1) != 0 &&
13852          Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
13853         (Ext1.getConstantOperandVal(1) != 0 &&
13854          Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
13855       return SDValue();
13856     unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
13857                                                           : AArch64ISD::SADDLP;
13858     return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0));
13859   };
13860 
13861   SDValue A = N->getOperand(0);
13862   if (SDValue R = DetectAddExtract(A))
13863     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R);
13864   if (A.getOpcode() == ISD::ADD) {
13865     if (SDValue R = DetectAddExtract(A.getOperand(0)))
13866       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13867                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13868                                      A.getOperand(1)));
13869     if (SDValue R = DetectAddExtract(A.getOperand(1)))
13870       return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
13871                          DAG.getNode(ISD::ADD, SDLoc(A), A.getValueType(), R,
13872                                      A.getOperand(0)));
13873   }
13874   return SDValue();
13875 }
13876 
13877 
13878 static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
13879                                  TargetLowering::DAGCombinerInfo &DCI,
13880                                  const AArch64Subtarget *Subtarget) {
13881   if (DCI.isBeforeLegalizeOps())
13882     return SDValue();
13883 
13884   return foldVectorXorShiftIntoCmp(N, DAG, Subtarget);
13885 }
13886 
13887 SDValue
13888 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
13889                                      SelectionDAG &DAG,
13890                                      SmallVectorImpl<SDNode *> &Created) const {
13891   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
13892   if (isIntDivCheap(N->getValueType(0), Attr))
13893     return SDValue(N,0); // Lower SDIV as SDIV
13894 
13895   EVT VT = N->getValueType(0);
13896 
13897   // For scalable and fixed types, mark them as cheap so we can handle it much
13898   // later. This allows us to handle larger than legal types.
13899   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
13900     return SDValue(N, 0);
13901 
13902   // fold (sdiv X, pow2)
13903   if ((VT != MVT::i32 && VT != MVT::i64) ||
13904       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
13905     return SDValue();
13906 
13907   SDLoc DL(N);
13908   SDValue N0 = N->getOperand(0);
13909   unsigned Lg2 = Divisor.countTrailingZeros();
13910   SDValue Zero = DAG.getConstant(0, DL, VT);
13911   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
13912 
13913   // Add (N0 < 0) ? Pow2 - 1 : 0;
13914   SDValue CCVal;
13915   SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL);
13916   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
13917   SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp);
13918 
13919   Created.push_back(Cmp.getNode());
13920   Created.push_back(Add.getNode());
13921   Created.push_back(CSel.getNode());
13922 
13923   // Divide by pow2.
13924   SDValue SRA =
13925       DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
13926 
13927   // If we're dividing by a positive value, we're done.  Otherwise, we must
13928   // negate the result.
13929   if (Divisor.isNonNegative())
13930     return SRA;
13931 
13932   Created.push_back(SRA.getNode());
13933   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
13934 }
13935 
13936 SDValue
13937 AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
13938                                      SelectionDAG &DAG,
13939                                      SmallVectorImpl<SDNode *> &Created) const {
13940   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
13941   if (isIntDivCheap(N->getValueType(0), Attr))
13942     return SDValue(N, 0); // Lower SREM as SREM
13943 
13944   EVT VT = N->getValueType(0);
13945 
13946   // For scalable and fixed types, mark them as cheap so we can handle it much
13947   // later. This allows us to handle larger than legal types.
13948   if (VT.isScalableVector() || Subtarget->useSVEForFixedLengthVectors())
13949     return SDValue(N, 0);
13950 
13951   // fold (srem X, pow2)
13952   if ((VT != MVT::i32 && VT != MVT::i64) ||
13953       !(Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()))
13954     return SDValue();
13955 
13956   unsigned Lg2 = Divisor.countTrailingZeros();
13957   if (Lg2 == 0)
13958     return SDValue();
13959 
13960   SDLoc DL(N);
13961   SDValue N0 = N->getOperand(0);
13962   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
13963   SDValue Zero = DAG.getConstant(0, DL, VT);
13964   SDValue CCVal, CSNeg;
13965   if (Lg2 == 1) {
13966     SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETGE, CCVal, DAG, DL);
13967     SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
13968     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, And, And, CCVal, Cmp);
13969 
13970     Created.push_back(Cmp.getNode());
13971     Created.push_back(And.getNode());
13972   } else {
13973     SDValue CCVal = DAG.getConstant(AArch64CC::MI, DL, MVT_CC);
13974     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
13975 
13976     SDValue Negs = DAG.getNode(AArch64ISD::SUBS, DL, VTs, Zero, N0);
13977     SDValue AndPos = DAG.getNode(ISD::AND, DL, VT, N0, Pow2MinusOne);
13978     SDValue AndNeg = DAG.getNode(ISD::AND, DL, VT, Negs, Pow2MinusOne);
13979     CSNeg = DAG.getNode(AArch64ISD::CSNEG, DL, VT, AndPos, AndNeg, CCVal,
13980                         Negs.getValue(1));
13981 
13982     Created.push_back(Negs.getNode());
13983     Created.push_back(AndPos.getNode());
13984     Created.push_back(AndNeg.getNode());
13985   }
13986 
13987   return CSNeg;
13988 }
13989 
13990 static bool IsSVECntIntrinsic(SDValue S) {
13991   switch(getIntrinsicID(S.getNode())) {
13992   default:
13993     break;
13994   case Intrinsic::aarch64_sve_cntb:
13995   case Intrinsic::aarch64_sve_cnth:
13996   case Intrinsic::aarch64_sve_cntw:
13997   case Intrinsic::aarch64_sve_cntd:
13998     return true;
13999   }
14000   return false;
14001 }
14002 
14003 /// Calculates what the pre-extend type is, based on the extension
14004 /// operation node provided by \p Extend.
14005 ///
14006 /// In the case that \p Extend is a SIGN_EXTEND or a ZERO_EXTEND, the
14007 /// pre-extend type is pulled directly from the operand, while other extend
14008 /// operations need a bit more inspection to get this information.
14009 ///
14010 /// \param Extend The SDNode from the DAG that represents the extend operation
14011 ///
14012 /// \returns The type representing the \p Extend source type, or \p MVT::Other
14013 /// if no valid type can be determined
14014 static EVT calculatePreExtendType(SDValue Extend) {
14015   switch (Extend.getOpcode()) {
14016   case ISD::SIGN_EXTEND:
14017   case ISD::ZERO_EXTEND:
14018     return Extend.getOperand(0).getValueType();
14019   case ISD::AssertSext:
14020   case ISD::AssertZext:
14021   case ISD::SIGN_EXTEND_INREG: {
14022     VTSDNode *TypeNode = dyn_cast<VTSDNode>(Extend.getOperand(1));
14023     if (!TypeNode)
14024       return MVT::Other;
14025     return TypeNode->getVT();
14026   }
14027   case ISD::AND: {
14028     ConstantSDNode *Constant =
14029         dyn_cast<ConstantSDNode>(Extend.getOperand(1).getNode());
14030     if (!Constant)
14031       return MVT::Other;
14032 
14033     uint32_t Mask = Constant->getZExtValue();
14034 
14035     if (Mask == UCHAR_MAX)
14036       return MVT::i8;
14037     else if (Mask == USHRT_MAX)
14038       return MVT::i16;
14039     else if (Mask == UINT_MAX)
14040       return MVT::i32;
14041 
14042     return MVT::Other;
14043   }
14044   default:
14045     return MVT::Other;
14046   }
14047 }
14048 
14049 /// Combines a buildvector(sext/zext) or shuffle(sext/zext, undef) node pattern
14050 /// into sext/zext(buildvector) or sext/zext(shuffle) making use of the vector
14051 /// SExt/ZExt rather than the scalar SExt/ZExt
14052 static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
14053   EVT VT = BV.getValueType();
14054   if (BV.getOpcode() != ISD::BUILD_VECTOR &&
14055       BV.getOpcode() != ISD::VECTOR_SHUFFLE)
14056     return SDValue();
14057 
14058   // Use the first item in the buildvector/shuffle to get the size of the
14059   // extend, and make sure it looks valid.
14060   SDValue Extend = BV->getOperand(0);
14061   unsigned ExtendOpcode = Extend.getOpcode();
14062   bool IsSExt = ExtendOpcode == ISD::SIGN_EXTEND ||
14063                 ExtendOpcode == ISD::SIGN_EXTEND_INREG ||
14064                 ExtendOpcode == ISD::AssertSext;
14065   if (!IsSExt && ExtendOpcode != ISD::ZERO_EXTEND &&
14066       ExtendOpcode != ISD::AssertZext && ExtendOpcode != ISD::AND)
14067     return SDValue();
14068   // Shuffle inputs are vector, limit to SIGN_EXTEND and ZERO_EXTEND to ensure
14069   // calculatePreExtendType will work without issue.
14070   if (BV.getOpcode() == ISD::VECTOR_SHUFFLE &&
14071       ExtendOpcode != ISD::SIGN_EXTEND && ExtendOpcode != ISD::ZERO_EXTEND)
14072     return SDValue();
14073 
14074   // Restrict valid pre-extend data type
14075   EVT PreExtendType = calculatePreExtendType(Extend);
14076   if (PreExtendType == MVT::Other ||
14077       PreExtendType.getScalarSizeInBits() != VT.getScalarSizeInBits() / 2)
14078     return SDValue();
14079 
14080   // Make sure all other operands are equally extended
14081   for (SDValue Op : drop_begin(BV->ops())) {
14082     if (Op.isUndef())
14083       continue;
14084     unsigned Opc = Op.getOpcode();
14085     bool OpcIsSExt = Opc == ISD::SIGN_EXTEND || Opc == ISD::SIGN_EXTEND_INREG ||
14086                      Opc == ISD::AssertSext;
14087     if (OpcIsSExt != IsSExt || calculatePreExtendType(Op) != PreExtendType)
14088       return SDValue();
14089   }
14090 
14091   SDValue NBV;
14092   SDLoc DL(BV);
14093   if (BV.getOpcode() == ISD::BUILD_VECTOR) {
14094     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
14095     EVT PreExtendLegalType =
14096         PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
14097     SmallVector<SDValue, 8> NewOps;
14098     for (SDValue Op : BV->ops())
14099       NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
14100                                     : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
14101                                                            PreExtendLegalType));
14102     NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
14103   } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
14104     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
14105     NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
14106                                BV.getOperand(1).isUndef()
14107                                    ? DAG.getUNDEF(PreExtendVT)
14108                                    : BV.getOperand(1).getOperand(0),
14109                                cast<ShuffleVectorSDNode>(BV)->getMask());
14110   }
14111   return DAG.getNode(IsSExt ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, NBV);
14112 }
14113 
14114 /// Combines a mul(dup(sext/zext)) node pattern into mul(sext/zext(dup))
14115 /// making use of the vector SExt/ZExt rather than the scalar SExt/ZExt
14116 static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {
14117   // If the value type isn't a vector, none of the operands are going to be dups
14118   EVT VT = Mul->getValueType(0);
14119   if (VT != MVT::v8i16 && VT != MVT::v4i32 && VT != MVT::v2i64)
14120     return SDValue();
14121 
14122   SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG);
14123   SDValue Op1 = performBuildShuffleExtendCombine(Mul->getOperand(1), DAG);
14124 
14125   // Neither operands have been changed, don't make any further changes
14126   if (!Op0 && !Op1)
14127     return SDValue();
14128 
14129   SDLoc DL(Mul);
14130   return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0),
14131                      Op1 ? Op1 : Mul->getOperand(1));
14132 }
14133 
14134 static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
14135                                  TargetLowering::DAGCombinerInfo &DCI,
14136                                  const AArch64Subtarget *Subtarget) {
14137 
14138   if (SDValue Ext = performMulVectorExtendCombine(N, DAG))
14139     return Ext;
14140 
14141   if (DCI.isBeforeLegalizeOps())
14142     return SDValue();
14143 
14144   // Canonicalize X*(Y+1) -> X*Y+X and (X+1)*Y -> X*Y+Y,
14145   // and in MachineCombiner pass, add+mul will be combined into madd.
14146   // Similarly, X*(1-Y) -> X - X*Y and (1-Y)*X -> X - Y*X.
14147   SDLoc DL(N);
14148   EVT VT = N->getValueType(0);
14149   SDValue N0 = N->getOperand(0);
14150   SDValue N1 = N->getOperand(1);
14151   SDValue MulOper;
14152   unsigned AddSubOpc;
14153 
14154   auto IsAddSubWith1 = [&](SDValue V) -> bool {
14155     AddSubOpc = V->getOpcode();
14156     if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
14157       SDValue Opnd = V->getOperand(1);
14158       MulOper = V->getOperand(0);
14159       if (AddSubOpc == ISD::SUB)
14160         std::swap(Opnd, MulOper);
14161       if (auto C = dyn_cast<ConstantSDNode>(Opnd))
14162         return C->isOne();
14163     }
14164     return false;
14165   };
14166 
14167   if (IsAddSubWith1(N0)) {
14168     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
14169     return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
14170   }
14171 
14172   if (IsAddSubWith1(N1)) {
14173     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
14174     return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
14175   }
14176 
14177   // The below optimizations require a constant RHS.
14178   if (!isa<ConstantSDNode>(N1))
14179     return SDValue();
14180 
14181   ConstantSDNode *C = cast<ConstantSDNode>(N1);
14182   const APInt &ConstValue = C->getAPIntValue();
14183 
14184   // Allow the scaling to be folded into the `cnt` instruction by preventing
14185   // the scaling to be obscured here. This makes it easier to pattern match.
14186   if (IsSVECntIntrinsic(N0) ||
14187      (N0->getOpcode() == ISD::TRUNCATE &&
14188       (IsSVECntIntrinsic(N0->getOperand(0)))))
14189        if (ConstValue.sge(1) && ConstValue.sle(16))
14190          return SDValue();
14191 
14192   // Multiplication of a power of two plus/minus one can be done more
14193   // cheaply as as shift+add/sub. For now, this is true unilaterally. If
14194   // future CPUs have a cheaper MADD instruction, this may need to be
14195   // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
14196   // 64-bit is 5 cycles, so this is always a win.
14197   // More aggressively, some multiplications N0 * C can be lowered to
14198   // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
14199   // e.g. 6=3*2=(2+1)*2.
14200   // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
14201   // which equals to (1+2)*16-(1+2).
14202 
14203   // TrailingZeroes is used to test if the mul can be lowered to
14204   // shift+add+shift.
14205   unsigned TrailingZeroes = ConstValue.countTrailingZeros();
14206   if (TrailingZeroes) {
14207     // Conservatively do not lower to shift+add+shift if the mul might be
14208     // folded into smul or umul.
14209     if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) ||
14210                             isZeroExtended(N0.getNode(), DAG)))
14211       return SDValue();
14212     // Conservatively do not lower to shift+add+shift if the mul might be
14213     // folded into madd or msub.
14214     if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD ||
14215                            N->use_begin()->getOpcode() == ISD::SUB))
14216       return SDValue();
14217   }
14218   // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
14219   // and shift+add+shift.
14220   APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
14221 
14222   unsigned ShiftAmt;
14223   // Is the shifted value the LHS operand of the add/sub?
14224   bool ShiftValUseIsN0 = true;
14225   // Do we need to negate the result?
14226   bool NegateResult = false;
14227 
14228   if (ConstValue.isNonNegative()) {
14229     // (mul x, 2^N + 1) => (add (shl x, N), x)
14230     // (mul x, 2^N - 1) => (sub (shl x, N), x)
14231     // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
14232     APInt SCVMinus1 = ShiftedConstValue - 1;
14233     APInt CVPlus1 = ConstValue + 1;
14234     if (SCVMinus1.isPowerOf2()) {
14235       ShiftAmt = SCVMinus1.logBase2();
14236       AddSubOpc = ISD::ADD;
14237     } else if (CVPlus1.isPowerOf2()) {
14238       ShiftAmt = CVPlus1.logBase2();
14239       AddSubOpc = ISD::SUB;
14240     } else
14241       return SDValue();
14242   } else {
14243     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
14244     // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
14245     APInt CVNegPlus1 = -ConstValue + 1;
14246     APInt CVNegMinus1 = -ConstValue - 1;
14247     if (CVNegPlus1.isPowerOf2()) {
14248       ShiftAmt = CVNegPlus1.logBase2();
14249       AddSubOpc = ISD::SUB;
14250       ShiftValUseIsN0 = false;
14251     } else if (CVNegMinus1.isPowerOf2()) {
14252       ShiftAmt = CVNegMinus1.logBase2();
14253       AddSubOpc = ISD::ADD;
14254       NegateResult = true;
14255     } else
14256       return SDValue();
14257   }
14258 
14259   SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0,
14260                                    DAG.getConstant(ShiftAmt, DL, MVT::i64));
14261 
14262   SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
14263   SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
14264   SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
14265   assert(!(NegateResult && TrailingZeroes) &&
14266          "NegateResult and TrailingZeroes cannot both be true for now.");
14267   // Negate the result.
14268   if (NegateResult)
14269     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
14270   // Shift the result.
14271   if (TrailingZeroes)
14272     return DAG.getNode(ISD::SHL, DL, VT, Res,
14273                        DAG.getConstant(TrailingZeroes, DL, MVT::i64));
14274   return Res;
14275 }
14276 
14277 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
14278                                                          SelectionDAG &DAG) {
14279   // Take advantage of vector comparisons producing 0 or -1 in each lane to
14280   // optimize away operation when it's from a constant.
14281   //
14282   // The general transformation is:
14283   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
14284   //       AND(VECTOR_CMP(x,y), constant2)
14285   //    constant2 = UNARYOP(constant)
14286 
14287   // Early exit if this isn't a vector operation, the operand of the
14288   // unary operation isn't a bitwise AND, or if the sizes of the operations
14289   // aren't the same.
14290   EVT VT = N->getValueType(0);
14291   if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
14292       N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
14293       VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
14294     return SDValue();
14295 
14296   // Now check that the other operand of the AND is a constant. We could
14297   // make the transformation for non-constant splats as well, but it's unclear
14298   // that would be a benefit as it would not eliminate any operations, just
14299   // perform one more step in scalar code before moving to the vector unit.
14300   if (BuildVectorSDNode *BV =
14301           dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
14302     // Bail out if the vector isn't a constant.
14303     if (!BV->isConstant())
14304       return SDValue();
14305 
14306     // Everything checks out. Build up the new and improved node.
14307     SDLoc DL(N);
14308     EVT IntVT = BV->getValueType(0);
14309     // Create a new constant of the appropriate type for the transformed
14310     // DAG.
14311     SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
14312     // The AND node needs bitcasts to/from an integer vector type around it.
14313     SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
14314     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
14315                                  N->getOperand(0)->getOperand(0), MaskConst);
14316     SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
14317     return Res;
14318   }
14319 
14320   return SDValue();
14321 }
14322 
14323 static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
14324                                      const AArch64Subtarget *Subtarget) {
14325   // First try to optimize away the conversion when it's conditionally from
14326   // a constant. Vectors only.
14327   if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
14328     return Res;
14329 
14330   EVT VT = N->getValueType(0);
14331   if (VT != MVT::f32 && VT != MVT::f64)
14332     return SDValue();
14333 
14334   // Only optimize when the source and destination types have the same width.
14335   if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
14336     return SDValue();
14337 
14338   // If the result of an integer load is only used by an integer-to-float
14339   // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead.
14340   // This eliminates an "integer-to-vector-move" UOP and improves throughput.
14341   SDValue N0 = N->getOperand(0);
14342   if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
14343       // Do not change the width of a volatile load.
14344       !cast<LoadSDNode>(N0)->isVolatile()) {
14345     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
14346     SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
14347                                LN0->getPointerInfo(), LN0->getAlign(),
14348                                LN0->getMemOperand()->getFlags());
14349 
14350     // Make sure successors of the original load stay after it by updating them
14351     // to use the new Chain.
14352     DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1));
14353 
14354     unsigned Opcode =
14355         (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF;
14356     return DAG.getNode(Opcode, SDLoc(N), VT, Load);
14357   }
14358 
14359   return SDValue();
14360 }
14361 
14362 /// Fold a floating-point multiply by power of two into floating-point to
14363 /// fixed-point conversion.
14364 static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
14365                                      TargetLowering::DAGCombinerInfo &DCI,
14366                                      const AArch64Subtarget *Subtarget) {
14367   if (!Subtarget->hasNEON())
14368     return SDValue();
14369 
14370   if (!N->getValueType(0).isSimple())
14371     return SDValue();
14372 
14373   SDValue Op = N->getOperand(0);
14374   if (!Op.getValueType().isSimple() || Op.getOpcode() != ISD::FMUL)
14375     return SDValue();
14376 
14377   if (!Op.getValueType().is64BitVector() && !Op.getValueType().is128BitVector())
14378     return SDValue();
14379 
14380   SDValue ConstVec = Op->getOperand(1);
14381   if (!isa<BuildVectorSDNode>(ConstVec))
14382     return SDValue();
14383 
14384   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14385   uint32_t FloatBits = FloatTy.getSizeInBits();
14386   if (FloatBits != 32 && FloatBits != 64 &&
14387       (FloatBits != 16 || !Subtarget->hasFullFP16()))
14388     return SDValue();
14389 
14390   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14391   uint32_t IntBits = IntTy.getSizeInBits();
14392   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14393     return SDValue();
14394 
14395   // Avoid conversions where iN is larger than the float (e.g., float -> i64).
14396   if (IntBits > FloatBits)
14397     return SDValue();
14398 
14399   BitVector UndefElements;
14400   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14401   int32_t Bits = IntBits == 64 ? 64 : 32;
14402   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1);
14403   if (C == -1 || C == 0 || C > Bits)
14404     return SDValue();
14405 
14406   EVT ResTy = Op.getValueType().changeVectorElementTypeToInteger();
14407   if (!DAG.getTargetLoweringInfo().isTypeLegal(ResTy))
14408     return SDValue();
14409 
14410   if (N->getOpcode() == ISD::FP_TO_SINT_SAT ||
14411       N->getOpcode() == ISD::FP_TO_UINT_SAT) {
14412     EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
14413     if (SatVT.getScalarSizeInBits() != IntBits || IntBits != FloatBits)
14414       return SDValue();
14415   }
14416 
14417   SDLoc DL(N);
14418   bool IsSigned = (N->getOpcode() == ISD::FP_TO_SINT ||
14419                    N->getOpcode() == ISD::FP_TO_SINT_SAT);
14420   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
14421                                       : Intrinsic::aarch64_neon_vcvtfp2fxu;
14422   SDValue FixConv =
14423       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy,
14424                   DAG.getConstant(IntrinsicOpcode, DL, MVT::i32),
14425                   Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32));
14426   // We can handle smaller integers by generating an extra trunc.
14427   if (IntBits < FloatBits)
14428     FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv);
14429 
14430   return FixConv;
14431 }
14432 
14433 /// Fold a floating-point divide by power of two into fixed-point to
14434 /// floating-point conversion.
14435 static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG,
14436                                   TargetLowering::DAGCombinerInfo &DCI,
14437                                   const AArch64Subtarget *Subtarget) {
14438   if (!Subtarget->hasNEON())
14439     return SDValue();
14440 
14441   SDValue Op = N->getOperand(0);
14442   unsigned Opc = Op->getOpcode();
14443   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14444       !Op.getOperand(0).getValueType().isSimple() ||
14445       (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP))
14446     return SDValue();
14447 
14448   SDValue ConstVec = N->getOperand(1);
14449   if (!isa<BuildVectorSDNode>(ConstVec))
14450     return SDValue();
14451 
14452   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14453   int32_t IntBits = IntTy.getSizeInBits();
14454   if (IntBits != 16 && IntBits != 32 && IntBits != 64)
14455     return SDValue();
14456 
14457   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14458   int32_t FloatBits = FloatTy.getSizeInBits();
14459   if (FloatBits != 32 && FloatBits != 64)
14460     return SDValue();
14461 
14462   // Avoid conversions where iN is larger than the float (e.g., i64 -> float).
14463   if (IntBits > FloatBits)
14464     return SDValue();
14465 
14466   BitVector UndefElements;
14467   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14468   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1);
14469   if (C == -1 || C == 0 || C > FloatBits)
14470     return SDValue();
14471 
14472   MVT ResTy;
14473   unsigned NumLanes = Op.getValueType().getVectorNumElements();
14474   switch (NumLanes) {
14475   default:
14476     return SDValue();
14477   case 2:
14478     ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64;
14479     break;
14480   case 4:
14481     ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64;
14482     break;
14483   }
14484 
14485   if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps())
14486     return SDValue();
14487 
14488   SDLoc DL(N);
14489   SDValue ConvInput = Op.getOperand(0);
14490   bool IsSigned = Opc == ISD::SINT_TO_FP;
14491   if (IntBits < FloatBits)
14492     ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
14493                             ResTy, ConvInput);
14494 
14495   unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
14496                                       : Intrinsic::aarch64_neon_vcvtfxu2fp;
14497   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
14498                      DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput,
14499                      DAG.getConstant(C, DL, MVT::i32));
14500 }
14501 
14502 /// An EXTR instruction is made up of two shifts, ORed together. This helper
14503 /// searches for and classifies those shifts.
14504 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
14505                          bool &FromHi) {
14506   if (N.getOpcode() == ISD::SHL)
14507     FromHi = false;
14508   else if (N.getOpcode() == ISD::SRL)
14509     FromHi = true;
14510   else
14511     return false;
14512 
14513   if (!isa<ConstantSDNode>(N.getOperand(1)))
14514     return false;
14515 
14516   ShiftAmount = N->getConstantOperandVal(1);
14517   Src = N->getOperand(0);
14518   return true;
14519 }
14520 
14521 /// EXTR instruction extracts a contiguous chunk of bits from two existing
14522 /// registers viewed as a high/low pair. This function looks for the pattern:
14523 /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
14524 /// with an EXTR. Can't quite be done in TableGen because the two immediates
14525 /// aren't independent.
14526 static SDValue tryCombineToEXTR(SDNode *N,
14527                                 TargetLowering::DAGCombinerInfo &DCI) {
14528   SelectionDAG &DAG = DCI.DAG;
14529   SDLoc DL(N);
14530   EVT VT = N->getValueType(0);
14531 
14532   assert(N->getOpcode() == ISD::OR && "Unexpected root");
14533 
14534   if (VT != MVT::i32 && VT != MVT::i64)
14535     return SDValue();
14536 
14537   SDValue LHS;
14538   uint32_t ShiftLHS = 0;
14539   bool LHSFromHi = false;
14540   if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
14541     return SDValue();
14542 
14543   SDValue RHS;
14544   uint32_t ShiftRHS = 0;
14545   bool RHSFromHi = false;
14546   if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
14547     return SDValue();
14548 
14549   // If they're both trying to come from the high part of the register, they're
14550   // not really an EXTR.
14551   if (LHSFromHi == RHSFromHi)
14552     return SDValue();
14553 
14554   if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
14555     return SDValue();
14556 
14557   if (LHSFromHi) {
14558     std::swap(LHS, RHS);
14559     std::swap(ShiftLHS, ShiftRHS);
14560   }
14561 
14562   return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
14563                      DAG.getConstant(ShiftRHS, DL, MVT::i64));
14564 }
14565 
14566 static SDValue tryCombineToBSL(SDNode *N,
14567                                 TargetLowering::DAGCombinerInfo &DCI) {
14568   EVT VT = N->getValueType(0);
14569   SelectionDAG &DAG = DCI.DAG;
14570   SDLoc DL(N);
14571 
14572   if (!VT.isVector())
14573     return SDValue();
14574 
14575   // The combining code currently only works for NEON vectors. In particular,
14576   // it does not work for SVE when dealing with vectors wider than 128 bits.
14577   if (!VT.is64BitVector() && !VT.is128BitVector())
14578     return SDValue();
14579 
14580   SDValue N0 = N->getOperand(0);
14581   if (N0.getOpcode() != ISD::AND)
14582     return SDValue();
14583 
14584   SDValue N1 = N->getOperand(1);
14585   if (N1.getOpcode() != ISD::AND)
14586     return SDValue();
14587 
14588   // InstCombine does (not (neg a)) => (add a -1).
14589   // Try: (or (and (neg a) b) (and (add a -1) c)) => (bsl (neg a) b c)
14590   // Loop over all combinations of AND operands.
14591   for (int i = 1; i >= 0; --i) {
14592     for (int j = 1; j >= 0; --j) {
14593       SDValue O0 = N0->getOperand(i);
14594       SDValue O1 = N1->getOperand(j);
14595       SDValue Sub, Add, SubSibling, AddSibling;
14596 
14597       // Find a SUB and an ADD operand, one from each AND.
14598       if (O0.getOpcode() == ISD::SUB && O1.getOpcode() == ISD::ADD) {
14599         Sub = O0;
14600         Add = O1;
14601         SubSibling = N0->getOperand(1 - i);
14602         AddSibling = N1->getOperand(1 - j);
14603       } else if (O0.getOpcode() == ISD::ADD && O1.getOpcode() == ISD::SUB) {
14604         Add = O0;
14605         Sub = O1;
14606         AddSibling = N0->getOperand(1 - i);
14607         SubSibling = N1->getOperand(1 - j);
14608       } else
14609         continue;
14610 
14611       if (!ISD::isBuildVectorAllZeros(Sub.getOperand(0).getNode()))
14612         continue;
14613 
14614       // Constant ones is always righthand operand of the Add.
14615       if (!ISD::isBuildVectorAllOnes(Add.getOperand(1).getNode()))
14616         continue;
14617 
14618       if (Sub.getOperand(1) != Add.getOperand(0))
14619         continue;
14620 
14621       return DAG.getNode(AArch64ISD::BSP, DL, VT, Sub, SubSibling, AddSibling);
14622     }
14623   }
14624 
14625   // (or (and a b) (and (not a) c)) => (bsl a b c)
14626   // We only have to look for constant vectors here since the general, variable
14627   // case can be handled in TableGen.
14628   unsigned Bits = VT.getScalarSizeInBits();
14629   uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
14630   for (int i = 1; i >= 0; --i)
14631     for (int j = 1; j >= 0; --j) {
14632       BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i));
14633       BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j));
14634       if (!BVN0 || !BVN1)
14635         continue;
14636 
14637       bool FoundMatch = true;
14638       for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) {
14639         ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k));
14640         ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k));
14641         if (!CN0 || !CN1 ||
14642             CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
14643           FoundMatch = false;
14644           break;
14645         }
14646       }
14647 
14648       if (FoundMatch)
14649         return DAG.getNode(AArch64ISD::BSP, DL, VT, SDValue(BVN0, 0),
14650                            N0->getOperand(1 - i), N1->getOperand(1 - j));
14651     }
14652 
14653   return SDValue();
14654 }
14655 
14656 // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
14657 // convert to csel(ccmp(.., cc0)), depending on cc1:
14658 
14659 // (AND (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14660 // =>
14661 // (CSET cc1 (CCMP x1 y1 !cc1 cc0 cmp0))
14662 //
14663 // (OR (CSET cc0 cmp0) (CSET cc1 (CMP x1 y1)))
14664 // =>
14665 // (CSET cc1 (CCMP x1 y1 cc1 !cc0 cmp0))
14666 static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {
14667   EVT VT = N->getValueType(0);
14668   SDValue CSel0 = N->getOperand(0);
14669   SDValue CSel1 = N->getOperand(1);
14670 
14671   if (CSel0.getOpcode() != AArch64ISD::CSEL ||
14672       CSel1.getOpcode() != AArch64ISD::CSEL)
14673     return SDValue();
14674 
14675   if (!CSel0->hasOneUse() || !CSel1->hasOneUse())
14676     return SDValue();
14677 
14678   if (!isNullConstant(CSel0.getOperand(0)) ||
14679       !isOneConstant(CSel0.getOperand(1)) ||
14680       !isNullConstant(CSel1.getOperand(0)) ||
14681       !isOneConstant(CSel1.getOperand(1)))
14682     return SDValue();
14683 
14684   SDValue Cmp0 = CSel0.getOperand(3);
14685   SDValue Cmp1 = CSel1.getOperand(3);
14686   AArch64CC::CondCode CC0 = (AArch64CC::CondCode)CSel0.getConstantOperandVal(2);
14687   AArch64CC::CondCode CC1 = (AArch64CC::CondCode)CSel1.getConstantOperandVal(2);
14688   if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
14689     return SDValue();
14690   if (Cmp1.getOpcode() != AArch64ISD::SUBS &&
14691       Cmp0.getOpcode() == AArch64ISD::SUBS) {
14692     std::swap(Cmp0, Cmp1);
14693     std::swap(CC0, CC1);
14694   }
14695 
14696   if (Cmp1.getOpcode() != AArch64ISD::SUBS)
14697     return SDValue();
14698 
14699   SDLoc DL(N);
14700   SDValue CCmp;
14701 
14702   if (N->getOpcode() == ISD::AND) {
14703     AArch64CC::CondCode InvCC0 = AArch64CC::getInvertedCondCode(CC0);
14704     SDValue Condition = DAG.getConstant(InvCC0, DL, MVT_CC);
14705     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CC1);
14706     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14707     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14708                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14709   } else {
14710     SDLoc DL(N);
14711     AArch64CC::CondCode InvCC1 = AArch64CC::getInvertedCondCode(CC1);
14712     SDValue Condition = DAG.getConstant(CC0, DL, MVT_CC);
14713     unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvCC1);
14714     SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
14715     CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0),
14716                        Cmp1.getOperand(1), NZCVOp, Condition, Cmp0);
14717   }
14718   return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0),
14719                      CSel0.getOperand(1), DAG.getConstant(CC1, DL, MVT::i32),
14720                      CCmp);
14721 }
14722 
14723 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
14724                                 const AArch64Subtarget *Subtarget) {
14725   SelectionDAG &DAG = DCI.DAG;
14726   EVT VT = N->getValueType(0);
14727 
14728   if (SDValue R = performANDORCSELCombine(N, DAG))
14729     return R;
14730 
14731   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14732     return SDValue();
14733 
14734   // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
14735   if (SDValue Res = tryCombineToEXTR(N, DCI))
14736     return Res;
14737 
14738   if (SDValue Res = tryCombineToBSL(N, DCI))
14739     return Res;
14740 
14741   return SDValue();
14742 }
14743 
14744 static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
14745   if (!MemVT.getVectorElementType().isSimple())
14746     return false;
14747 
14748   uint64_t MaskForTy = 0ull;
14749   switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
14750   case MVT::i8:
14751     MaskForTy = 0xffull;
14752     break;
14753   case MVT::i16:
14754     MaskForTy = 0xffffull;
14755     break;
14756   case MVT::i32:
14757     MaskForTy = 0xffffffffull;
14758     break;
14759   default:
14760     return false;
14761     break;
14762   }
14763 
14764   if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
14765     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
14766       return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
14767 
14768   return false;
14769 }
14770 
14771 static SDValue performSVEAndCombine(SDNode *N,
14772                                     TargetLowering::DAGCombinerInfo &DCI) {
14773   if (DCI.isBeforeLegalizeOps())
14774     return SDValue();
14775 
14776   SelectionDAG &DAG = DCI.DAG;
14777   SDValue Src = N->getOperand(0);
14778   unsigned Opc = Src->getOpcode();
14779 
14780   // Zero/any extend of an unsigned unpack
14781   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
14782     SDValue UnpkOp = Src->getOperand(0);
14783     SDValue Dup = N->getOperand(1);
14784 
14785     if (Dup.getOpcode() != ISD::SPLAT_VECTOR)
14786       return SDValue();
14787 
14788     SDLoc DL(N);
14789     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0));
14790     if (!C)
14791       return SDValue();
14792 
14793     uint64_t ExtVal = C->getZExtValue();
14794 
14795     // If the mask is fully covered by the unpack, we don't need to push
14796     // a new AND onto the operand
14797     EVT EltTy = UnpkOp->getValueType(0).getVectorElementType();
14798     if ((ExtVal == 0xFF && EltTy == MVT::i8) ||
14799         (ExtVal == 0xFFFF && EltTy == MVT::i16) ||
14800         (ExtVal == 0xFFFFFFFF && EltTy == MVT::i32))
14801       return Src;
14802 
14803     // Truncate to prevent a DUP with an over wide constant
14804     APInt Mask = C->getAPIntValue().trunc(EltTy.getSizeInBits());
14805 
14806     // Otherwise, make sure we propagate the AND to the operand
14807     // of the unpack
14808     Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0),
14809                       DAG.getConstant(Mask.zextOrTrunc(32), DL, MVT::i32));
14810 
14811     SDValue And = DAG.getNode(ISD::AND, DL,
14812                               UnpkOp->getValueType(0), UnpkOp, Dup);
14813 
14814     return DAG.getNode(Opc, DL, N->getValueType(0), And);
14815   }
14816 
14817   if (!EnableCombineMGatherIntrinsics)
14818     return SDValue();
14819 
14820   SDValue Mask = N->getOperand(1);
14821 
14822   if (!Src.hasOneUse())
14823     return SDValue();
14824 
14825   EVT MemVT;
14826 
14827   // SVE load instructions perform an implicit zero-extend, which makes them
14828   // perfect candidates for combining.
14829   switch (Opc) {
14830   case AArch64ISD::LD1_MERGE_ZERO:
14831   case AArch64ISD::LDNF1_MERGE_ZERO:
14832   case AArch64ISD::LDFF1_MERGE_ZERO:
14833     MemVT = cast<VTSDNode>(Src->getOperand(3))->getVT();
14834     break;
14835   case AArch64ISD::GLD1_MERGE_ZERO:
14836   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
14837   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
14838   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
14839   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
14840   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
14841   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
14842   case AArch64ISD::GLDFF1_MERGE_ZERO:
14843   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
14844   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
14845   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
14846   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
14847   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
14848   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
14849   case AArch64ISD::GLDNT1_MERGE_ZERO:
14850     MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
14851     break;
14852   default:
14853     return SDValue();
14854   }
14855 
14856   if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
14857     return Src;
14858 
14859   return SDValue();
14860 }
14861 
14862 static SDValue performANDCombine(SDNode *N,
14863                                  TargetLowering::DAGCombinerInfo &DCI) {
14864   SelectionDAG &DAG = DCI.DAG;
14865   SDValue LHS = N->getOperand(0);
14866   SDValue RHS = N->getOperand(1);
14867   EVT VT = N->getValueType(0);
14868 
14869   if (SDValue R = performANDORCSELCombine(N, DAG))
14870     return R;
14871 
14872   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
14873     return SDValue();
14874 
14875   if (VT.isScalableVector())
14876     return performSVEAndCombine(N, DCI);
14877 
14878   // The combining code below works only for NEON vectors. In particular, it
14879   // does not work for SVE when dealing with vectors wider than 128 bits.
14880   if (!VT.is64BitVector() && !VT.is128BitVector())
14881     return SDValue();
14882 
14883   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
14884   if (!BVN)
14885     return SDValue();
14886 
14887   // AND does not accept an immediate, so check if we can use a BIC immediate
14888   // instruction instead. We do this here instead of using a (and x, (mvni imm))
14889   // pattern in isel, because some immediates may be lowered to the preferred
14890   // (and x, (movi imm)) form, even though an mvni representation also exists.
14891   APInt DefBits(VT.getSizeInBits(), 0);
14892   APInt UndefBits(VT.getSizeInBits(), 0);
14893   if (resolveBuildVector(BVN, DefBits, UndefBits)) {
14894     SDValue NewOp;
14895 
14896     DefBits = ~DefBits;
14897     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14898                                     DefBits, &LHS)) ||
14899         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14900                                     DefBits, &LHS)))
14901       return NewOp;
14902 
14903     UndefBits = ~UndefBits;
14904     if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG,
14905                                     UndefBits, &LHS)) ||
14906         (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG,
14907                                     UndefBits, &LHS)))
14908       return NewOp;
14909   }
14910 
14911   return SDValue();
14912 }
14913 
14914 static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
14915   switch (Opcode) {
14916   case ISD::STRICT_FADD:
14917   case ISD::FADD:
14918     return (FullFP16 && VT == MVT::f16) || VT == MVT::f32 || VT == MVT::f64;
14919   case ISD::ADD:
14920     return VT == MVT::i64;
14921   default:
14922     return false;
14923   }
14924 }
14925 
14926 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
14927                         AArch64CC::CondCode Cond);
14928 
14929 static bool isPredicateCCSettingOp(SDValue N) {
14930   if ((N.getOpcode() == ISD::SETCC) ||
14931       (N.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14932        (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege ||
14933         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt ||
14934         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi ||
14935         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs ||
14936         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele ||
14937         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo ||
14938         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels ||
14939         N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt ||
14940         // get_active_lane_mask is lowered to a whilelo instruction.
14941         N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask)))
14942     return true;
14943 
14944   return false;
14945 }
14946 
14947 // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
14948 // ... into: "ptrue p, all" + PTEST
14949 static SDValue
14950 performFirstTrueTestVectorCombine(SDNode *N,
14951                                   TargetLowering::DAGCombinerInfo &DCI,
14952                                   const AArch64Subtarget *Subtarget) {
14953   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
14954   // Make sure PTEST can be legalised with illegal types.
14955   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
14956     return SDValue();
14957 
14958   SDValue N0 = N->getOperand(0);
14959   EVT VT = N0.getValueType();
14960 
14961   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1 ||
14962       !isNullConstant(N->getOperand(1)))
14963     return SDValue();
14964 
14965   // Restricted the DAG combine to only cases where we're extracting from a
14966   // flag-setting operation.
14967   if (!isPredicateCCSettingOp(N0))
14968     return SDValue();
14969 
14970   // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0
14971   SelectionDAG &DAG = DCI.DAG;
14972   SDValue Pg = getPTrue(DAG, SDLoc(N), VT, AArch64SVEPredPattern::all);
14973   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE);
14974 }
14975 
14976 // Materialize : Idx = (add (mul vscale, NumEls), -1)
14977 //               i1 = extract_vector_elt t37, Constant:i64<Idx>
14978 //     ... into: "ptrue p, all" + PTEST
14979 static SDValue
14980 performLastTrueTestVectorCombine(SDNode *N,
14981                                  TargetLowering::DAGCombinerInfo &DCI,
14982                                  const AArch64Subtarget *Subtarget) {
14983   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
14984   // Make sure PTEST is legal types.
14985   if (!Subtarget->hasSVE() || DCI.isBeforeLegalize())
14986     return SDValue();
14987 
14988   SDValue N0 = N->getOperand(0);
14989   EVT OpVT = N0.getValueType();
14990 
14991   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
14992     return SDValue();
14993 
14994   // Idx == (add (mul vscale, NumEls), -1)
14995   SDValue Idx = N->getOperand(1);
14996   if (Idx.getOpcode() != ISD::ADD || !isAllOnesConstant(Idx.getOperand(1)))
14997     return SDValue();
14998 
14999   SDValue VS = Idx.getOperand(0);
15000   if (VS.getOpcode() != ISD::VSCALE)
15001     return SDValue();
15002 
15003   unsigned NumEls = OpVT.getVectorElementCount().getKnownMinValue();
15004   if (VS.getConstantOperandVal(0) != NumEls)
15005     return SDValue();
15006 
15007   // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0
15008   SelectionDAG &DAG = DCI.DAG;
15009   SDValue Pg = getPTrue(DAG, SDLoc(N), OpVT, AArch64SVEPredPattern::all);
15010   return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE);
15011 }
15012 
15013 static SDValue
15014 performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15015                                const AArch64Subtarget *Subtarget) {
15016   assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT);
15017   if (SDValue Res = performFirstTrueTestVectorCombine(N, DCI, Subtarget))
15018     return Res;
15019   if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget))
15020     return Res;
15021 
15022   SelectionDAG &DAG = DCI.DAG;
15023   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15024   ConstantSDNode *ConstantN1 = dyn_cast<ConstantSDNode>(N1);
15025 
15026   EVT VT = N->getValueType(0);
15027   const bool FullFP16 = DAG.getSubtarget<AArch64Subtarget>().hasFullFP16();
15028   bool IsStrict = N0->isStrictFPOpcode();
15029 
15030   // extract(dup x) -> x
15031   if (N0.getOpcode() == AArch64ISD::DUP)
15032     return DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
15033 
15034   // Rewrite for pairwise fadd pattern
15035   //   (f32 (extract_vector_elt
15036   //           (fadd (vXf32 Other)
15037   //                 (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0))
15038   // ->
15039   //   (f32 (fadd (extract_vector_elt (vXf32 Other) 0)
15040   //              (extract_vector_elt (vXf32 Other) 1))
15041   // For strict_fadd we need to make sure the old strict_fadd can be deleted, so
15042   // we can only do this when it's used only by the extract_vector_elt.
15043   if (ConstantN1 && ConstantN1->getZExtValue() == 0 &&
15044       hasPairwiseAdd(N0->getOpcode(), VT, FullFP16) &&
15045       (!IsStrict || N0.hasOneUse())) {
15046     SDLoc DL(N0);
15047     SDValue N00 = N0->getOperand(IsStrict ? 1 : 0);
15048     SDValue N01 = N0->getOperand(IsStrict ? 2 : 1);
15049 
15050     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(N01);
15051     SDValue Other = N00;
15052 
15053     // And handle the commutative case.
15054     if (!Shuffle) {
15055       Shuffle = dyn_cast<ShuffleVectorSDNode>(N00);
15056       Other = N01;
15057     }
15058 
15059     if (Shuffle && Shuffle->getMaskElt(0) == 1 &&
15060         Other == Shuffle->getOperand(0)) {
15061       SDValue Extract1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15062                                      DAG.getConstant(0, DL, MVT::i64));
15063       SDValue Extract2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Other,
15064                                      DAG.getConstant(1, DL, MVT::i64));
15065       if (!IsStrict)
15066         return DAG.getNode(N0->getOpcode(), DL, VT, Extract1, Extract2);
15067 
15068       // For strict_fadd we need uses of the final extract_vector to be replaced
15069       // with the strict_fadd, but we also need uses of the chain output of the
15070       // original strict_fadd to use the chain output of the new strict_fadd as
15071       // otherwise it may not be deleted.
15072       SDValue Ret = DAG.getNode(N0->getOpcode(), DL,
15073                                 {VT, MVT::Other},
15074                                 {N0->getOperand(0), Extract1, Extract2});
15075       DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret);
15076       DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Ret.getValue(1));
15077       return SDValue(N, 0);
15078     }
15079   }
15080 
15081   return SDValue();
15082 }
15083 
15084 static SDValue performConcatVectorsCombine(SDNode *N,
15085                                            TargetLowering::DAGCombinerInfo &DCI,
15086                                            SelectionDAG &DAG) {
15087   SDLoc dl(N);
15088   EVT VT = N->getValueType(0);
15089   SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
15090   unsigned N0Opc = N0->getOpcode(), N1Opc = N1->getOpcode();
15091 
15092   if (VT.isScalableVector())
15093     return SDValue();
15094 
15095   // Optimize concat_vectors of truncated vectors, where the intermediate
15096   // type is illegal, to avoid said illegality,  e.g.,
15097   //   (v4i16 (concat_vectors (v2i16 (truncate (v2i64))),
15098   //                          (v2i16 (truncate (v2i64)))))
15099   // ->
15100   //   (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))),
15101   //                                    (v4i32 (bitcast (v2i64))),
15102   //                                    <0, 2, 4, 6>)))
15103   // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed
15104   // on both input and result type, so we might generate worse code.
15105   // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8.
15106   if (N->getNumOperands() == 2 && N0Opc == ISD::TRUNCATE &&
15107       N1Opc == ISD::TRUNCATE) {
15108     SDValue N00 = N0->getOperand(0);
15109     SDValue N10 = N1->getOperand(0);
15110     EVT N00VT = N00.getValueType();
15111 
15112     if (N00VT == N10.getValueType() &&
15113         (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) &&
15114         N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) {
15115       MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16);
15116       SmallVector<int, 8> Mask(MidVT.getVectorNumElements());
15117       for (size_t i = 0; i < Mask.size(); ++i)
15118         Mask[i] = i * 2;
15119       return DAG.getNode(ISD::TRUNCATE, dl, VT,
15120                          DAG.getVectorShuffle(
15121                              MidVT, dl,
15122                              DAG.getNode(ISD::BITCAST, dl, MidVT, N00),
15123                              DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask));
15124     }
15125   }
15126 
15127   if (N->getOperand(0).getValueType() == MVT::v4i8) {
15128     // If we have a concat of v4i8 loads, convert them to a buildvector of f32
15129     // loads to prevent having to go through the v4i8 load legalization that
15130     // needs to extend each element into a larger type.
15131     if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
15132           if (V.getValueType() != MVT::v4i8)
15133             return false;
15134           if (V.isUndef())
15135             return true;
15136           LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
15137           return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
15138                  LD->getExtensionType() == ISD::NON_EXTLOAD;
15139         })) {
15140       EVT NVT =
15141           EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
15142       SmallVector<SDValue> Ops;
15143 
15144       for (unsigned i = 0; i < N->getNumOperands(); i++) {
15145         SDValue V = N->getOperand(i);
15146         if (V.isUndef())
15147           Ops.push_back(DAG.getUNDEF(MVT::f32));
15148         else {
15149           LoadSDNode *LD = cast<LoadSDNode>(V);
15150           SDValue NewLoad =
15151               DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
15152                           LD->getMemOperand());
15153           DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
15154           Ops.push_back(NewLoad);
15155         }
15156       }
15157       return DAG.getBitcast(N->getValueType(0),
15158                             DAG.getBuildVector(NVT, dl, Ops));
15159     }
15160   }
15161 
15162 
15163   // Wait 'til after everything is legalized to try this. That way we have
15164   // legal vector types and such.
15165   if (DCI.isBeforeLegalizeOps())
15166     return SDValue();
15167 
15168   // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
15169   // extracted subvectors from the same original vectors. Combine these into a
15170   // single avg that operates on the two original vectors.
15171   // avgceil is the target independant name for rhadd, avgfloor is a hadd.
15172   // Example:
15173   //  (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
15174   //                                   extract_subvector (v16i8 OpB, <0>))),
15175   //                  (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
15176   //                                   extract_subvector (v16i8 OpB, <8>)))))
15177   // ->
15178   //  (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
15179   if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
15180       (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
15181        N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
15182     SDValue N00 = N0->getOperand(0);
15183     SDValue N01 = N0->getOperand(1);
15184     SDValue N10 = N1->getOperand(0);
15185     SDValue N11 = N1->getOperand(1);
15186 
15187     EVT N00VT = N00.getValueType();
15188     EVT N10VT = N10.getValueType();
15189 
15190     if (N00->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15191         N01->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15192         N10->getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15193         N11->getOpcode() == ISD::EXTRACT_SUBVECTOR && N00VT == N10VT) {
15194       SDValue N00Source = N00->getOperand(0);
15195       SDValue N01Source = N01->getOperand(0);
15196       SDValue N10Source = N10->getOperand(0);
15197       SDValue N11Source = N11->getOperand(0);
15198 
15199       if (N00Source == N10Source && N01Source == N11Source &&
15200           N00Source.getValueType() == VT && N01Source.getValueType() == VT) {
15201         assert(N0.getValueType() == N1.getValueType());
15202 
15203         uint64_t N00Index = N00.getConstantOperandVal(1);
15204         uint64_t N01Index = N01.getConstantOperandVal(1);
15205         uint64_t N10Index = N10.getConstantOperandVal(1);
15206         uint64_t N11Index = N11.getConstantOperandVal(1);
15207 
15208         if (N00Index == N01Index && N10Index == N11Index && N00Index == 0 &&
15209             N10Index == N00VT.getVectorNumElements())
15210           return DAG.getNode(N0Opc, dl, VT, N00Source, N01Source);
15211       }
15212     }
15213   }
15214 
15215   // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector
15216   // splat. The indexed instructions are going to be expecting a DUPLANE64, so
15217   // canonicalise to that.
15218   if (N->getNumOperands() == 2 && N0 == N1 && VT.getVectorNumElements() == 2) {
15219     assert(VT.getScalarSizeInBits() == 64);
15220     return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
15221                        DAG.getConstant(0, dl, MVT::i64));
15222   }
15223 
15224   // Canonicalise concat_vectors so that the right-hand vector has as few
15225   // bit-casts as possible before its real operation. The primary matching
15226   // destination for these operations will be the narrowing "2" instructions,
15227   // which depend on the operation being performed on this right-hand vector.
15228   // For example,
15229   //    (concat_vectors LHS,  (v1i64 (bitconvert (v4i16 RHS))))
15230   // becomes
15231   //    (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS))
15232 
15233   if (N->getNumOperands() != 2 || N1Opc != ISD::BITCAST)
15234     return SDValue();
15235   SDValue RHS = N1->getOperand(0);
15236   MVT RHSTy = RHS.getValueType().getSimpleVT();
15237   // If the RHS is not a vector, this is not the pattern we're looking for.
15238   if (!RHSTy.isVector())
15239     return SDValue();
15240 
15241   LLVM_DEBUG(
15242       dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n");
15243 
15244   MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(),
15245                                   RHSTy.getVectorNumElements() * 2);
15246   return DAG.getNode(ISD::BITCAST, dl, VT,
15247                      DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy,
15248                                  DAG.getNode(ISD::BITCAST, dl, RHSTy, N0),
15249                                  RHS));
15250 }
15251 
15252 static SDValue
15253 performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15254                                SelectionDAG &DAG) {
15255   if (DCI.isBeforeLegalizeOps())
15256     return SDValue();
15257 
15258   EVT VT = N->getValueType(0);
15259   if (!VT.isScalableVector() || VT.getVectorElementType() != MVT::i1)
15260     return SDValue();
15261 
15262   SDValue V = N->getOperand(0);
15263 
15264   // NOTE: This combine exists in DAGCombiner, but that version's legality check
15265   // blocks this combine because the non-const case requires custom lowering.
15266   //
15267   // ty1 extract_vector(ty2 splat(const))) -> ty1 splat(const)
15268   if (V.getOpcode() == ISD::SPLAT_VECTOR)
15269     if (isa<ConstantSDNode>(V.getOperand(0)))
15270       return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0));
15271 
15272   return SDValue();
15273 }
15274 
15275 static SDValue
15276 performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
15277                               SelectionDAG &DAG) {
15278   SDLoc DL(N);
15279   SDValue Vec = N->getOperand(0);
15280   SDValue SubVec = N->getOperand(1);
15281   uint64_t IdxVal = N->getConstantOperandVal(2);
15282   EVT VecVT = Vec.getValueType();
15283   EVT SubVT = SubVec.getValueType();
15284 
15285   // Only do this for legal fixed vector types.
15286   if (!VecVT.isFixedLengthVector() ||
15287       !DAG.getTargetLoweringInfo().isTypeLegal(VecVT) ||
15288       !DAG.getTargetLoweringInfo().isTypeLegal(SubVT))
15289     return SDValue();
15290 
15291   // Ignore widening patterns.
15292   if (IdxVal == 0 && Vec.isUndef())
15293     return SDValue();
15294 
15295   // Subvector must be half the width and an "aligned" insertion.
15296   unsigned NumSubElts = SubVT.getVectorNumElements();
15297   if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() ||
15298       (IdxVal != 0 && IdxVal != NumSubElts))
15299     return SDValue();
15300 
15301   // Fold insert_subvector -> concat_vectors
15302   // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi))
15303   // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub)
15304   SDValue Lo, Hi;
15305   if (IdxVal == 0) {
15306     Lo = SubVec;
15307     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15308                      DAG.getVectorIdxConstant(NumSubElts, DL));
15309   } else {
15310     Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec,
15311                      DAG.getVectorIdxConstant(0, DL));
15312     Hi = SubVec;
15313   }
15314   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi);
15315 }
15316 
15317 static SDValue tryCombineFixedPointConvert(SDNode *N,
15318                                            TargetLowering::DAGCombinerInfo &DCI,
15319                                            SelectionDAG &DAG) {
15320   // Wait until after everything is legalized to try this. That way we have
15321   // legal vector types and such.
15322   if (DCI.isBeforeLegalizeOps())
15323     return SDValue();
15324   // Transform a scalar conversion of a value from a lane extract into a
15325   // lane extract of a vector conversion. E.g., from foo1 to foo2:
15326   // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); }
15327   // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; }
15328   //
15329   // The second form interacts better with instruction selection and the
15330   // register allocator to avoid cross-class register copies that aren't
15331   // coalescable due to a lane reference.
15332 
15333   // Check the operand and see if it originates from a lane extract.
15334   SDValue Op1 = N->getOperand(1);
15335   if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15336     return SDValue();
15337 
15338   // Yep, no additional predication needed. Perform the transform.
15339   SDValue IID = N->getOperand(0);
15340   SDValue Shift = N->getOperand(2);
15341   SDValue Vec = Op1.getOperand(0);
15342   SDValue Lane = Op1.getOperand(1);
15343   EVT ResTy = N->getValueType(0);
15344   EVT VecResTy;
15345   SDLoc DL(N);
15346 
15347   // The vector width should be 128 bits by the time we get here, even
15348   // if it started as 64 bits (the extract_vector handling will have
15349   // done so). Bail if it is not.
15350   if (Vec.getValueSizeInBits() != 128)
15351     return SDValue();
15352 
15353   if (Vec.getValueType() == MVT::v4i32)
15354     VecResTy = MVT::v4f32;
15355   else if (Vec.getValueType() == MVT::v2i64)
15356     VecResTy = MVT::v2f64;
15357   else
15358     return SDValue();
15359 
15360   SDValue Convert =
15361       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift);
15362   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane);
15363 }
15364 
15365 // AArch64 high-vector "long" operations are formed by performing the non-high
15366 // version on an extract_subvector of each operand which gets the high half:
15367 //
15368 //  (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS))
15369 //
15370 // However, there are cases which don't have an extract_high explicitly, but
15371 // have another operation that can be made compatible with one for free. For
15372 // example:
15373 //
15374 //  (dupv64 scalar) --> (extract_high (dup128 scalar))
15375 //
15376 // This routine does the actual conversion of such DUPs, once outer routines
15377 // have determined that everything else is in order.
15378 // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
15379 // similarly here.
15380 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
15381   MVT VT = N.getSimpleValueType();
15382   if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15383       N.getConstantOperandVal(1) == 0)
15384     N = N.getOperand(0);
15385 
15386   switch (N.getOpcode()) {
15387   case AArch64ISD::DUP:
15388   case AArch64ISD::DUPLANE8:
15389   case AArch64ISD::DUPLANE16:
15390   case AArch64ISD::DUPLANE32:
15391   case AArch64ISD::DUPLANE64:
15392   case AArch64ISD::MOVI:
15393   case AArch64ISD::MOVIshift:
15394   case AArch64ISD::MOVIedit:
15395   case AArch64ISD::MOVImsl:
15396   case AArch64ISD::MVNIshift:
15397   case AArch64ISD::MVNImsl:
15398     break;
15399   default:
15400     // FMOV could be supported, but isn't very useful, as it would only occur
15401     // if you passed a bitcast' floating point immediate to an eligible long
15402     // integer op (addl, smull, ...).
15403     return SDValue();
15404   }
15405 
15406   if (!VT.is64BitVector())
15407     return SDValue();
15408 
15409   SDLoc DL(N);
15410   unsigned NumElems = VT.getVectorNumElements();
15411   if (N.getValueType().is64BitVector()) {
15412     MVT ElementTy = VT.getVectorElementType();
15413     MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
15414     N = DAG.getNode(N->getOpcode(), DL, NewVT, N->ops());
15415   }
15416 
15417   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N,
15418                      DAG.getConstant(NumElems, DL, MVT::i64));
15419 }
15420 
15421 static bool isEssentiallyExtractHighSubvector(SDValue N) {
15422   if (N.getOpcode() == ISD::BITCAST)
15423     N = N.getOperand(0);
15424   if (N.getOpcode() != ISD::EXTRACT_SUBVECTOR)
15425     return false;
15426   if (N.getOperand(0).getValueType().isScalableVector())
15427     return false;
15428   return cast<ConstantSDNode>(N.getOperand(1))->getAPIntValue() ==
15429          N.getOperand(0).getValueType().getVectorNumElements() / 2;
15430 }
15431 
15432 /// Helper structure to keep track of ISD::SET_CC operands.
15433 struct GenericSetCCInfo {
15434   const SDValue *Opnd0;
15435   const SDValue *Opnd1;
15436   ISD::CondCode CC;
15437 };
15438 
15439 /// Helper structure to keep track of a SET_CC lowered into AArch64 code.
15440 struct AArch64SetCCInfo {
15441   const SDValue *Cmp;
15442   AArch64CC::CondCode CC;
15443 };
15444 
15445 /// Helper structure to keep track of SetCC information.
15446 union SetCCInfo {
15447   GenericSetCCInfo Generic;
15448   AArch64SetCCInfo AArch64;
15449 };
15450 
15451 /// Helper structure to be able to read SetCC information.  If set to
15452 /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
15453 /// GenericSetCCInfo.
15454 struct SetCCInfoAndKind {
15455   SetCCInfo Info;
15456   bool IsAArch64;
15457 };
15458 
15459 /// Check whether or not \p Op is a SET_CC operation, either a generic or
15460 /// an
15461 /// AArch64 lowered one.
15462 /// \p SetCCInfo is filled accordingly.
15463 /// \post SetCCInfo is meanginfull only when this function returns true.
15464 /// \return True when Op is a kind of SET_CC operation.
15465 static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
15466   // If this is a setcc, this is straight forward.
15467   if (Op.getOpcode() == ISD::SETCC) {
15468     SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0);
15469     SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1);
15470     SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15471     SetCCInfo.IsAArch64 = false;
15472     return true;
15473   }
15474   // Otherwise, check if this is a matching csel instruction.
15475   // In other words:
15476   // - csel 1, 0, cc
15477   // - csel 0, 1, !cc
15478   if (Op.getOpcode() != AArch64ISD::CSEL)
15479     return false;
15480   // Set the information about the operands.
15481   // TODO: we want the operands of the Cmp not the csel
15482   SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
15483   SetCCInfo.IsAArch64 = true;
15484   SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
15485       cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
15486 
15487   // Check that the operands matches the constraints:
15488   // (1) Both operands must be constants.
15489   // (2) One must be 1 and the other must be 0.
15490   ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0));
15491   ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15492 
15493   // Check (1).
15494   if (!TValue || !FValue)
15495     return false;
15496 
15497   // Check (2).
15498   if (!TValue->isOne()) {
15499     // Update the comparison when we are interested in !cc.
15500     std::swap(TValue, FValue);
15501     SetCCInfo.Info.AArch64.CC =
15502         AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC);
15503   }
15504   return TValue->isOne() && FValue->isZero();
15505 }
15506 
15507 // Returns true if Op is setcc or zext of setcc.
15508 static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {
15509   if (isSetCC(Op, Info))
15510     return true;
15511   return ((Op.getOpcode() == ISD::ZERO_EXTEND) &&
15512     isSetCC(Op->getOperand(0), Info));
15513 }
15514 
15515 // The folding we want to perform is:
15516 // (add x, [zext] (setcc cc ...) )
15517 //   -->
15518 // (csel x, (add x, 1), !cc ...)
15519 //
15520 // The latter will get matched to a CSINC instruction.
15521 static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
15522   assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!");
15523   SDValue LHS = Op->getOperand(0);
15524   SDValue RHS = Op->getOperand(1);
15525   SetCCInfoAndKind InfoAndKind;
15526 
15527   // If both operands are a SET_CC, then we don't want to perform this
15528   // folding and create another csel as this results in more instructions
15529   // (and higher register usage).
15530   if (isSetCCOrZExtSetCC(LHS, InfoAndKind) &&
15531       isSetCCOrZExtSetCC(RHS, InfoAndKind))
15532     return SDValue();
15533 
15534   // If neither operand is a SET_CC, give up.
15535   if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) {
15536     std::swap(LHS, RHS);
15537     if (!isSetCCOrZExtSetCC(LHS, InfoAndKind))
15538       return SDValue();
15539   }
15540 
15541   // FIXME: This could be generatized to work for FP comparisons.
15542   EVT CmpVT = InfoAndKind.IsAArch64
15543                   ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
15544                   : InfoAndKind.Info.Generic.Opnd0->getValueType();
15545   if (CmpVT != MVT::i32 && CmpVT != MVT::i64)
15546     return SDValue();
15547 
15548   SDValue CCVal;
15549   SDValue Cmp;
15550   SDLoc dl(Op);
15551   if (InfoAndKind.IsAArch64) {
15552     CCVal = DAG.getConstant(
15553         AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
15554         MVT::i32);
15555     Cmp = *InfoAndKind.Info.AArch64.Cmp;
15556   } else
15557     Cmp = getAArch64Cmp(
15558         *InfoAndKind.Info.Generic.Opnd0, *InfoAndKind.Info.Generic.Opnd1,
15559         ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, CmpVT), CCVal, DAG,
15560         dl);
15561 
15562   EVT VT = Op->getValueType(0);
15563   LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
15564   return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
15565 }
15566 
15567 // ADD(UADDV a, UADDV b) -->  UADDV(ADD a, b)
15568 static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {
15569   EVT VT = N->getValueType(0);
15570   // Only scalar integer and vector types.
15571   if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger())
15572     return SDValue();
15573 
15574   SDValue LHS = N->getOperand(0);
15575   SDValue RHS = N->getOperand(1);
15576   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15577       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT)
15578     return SDValue();
15579 
15580   auto *LHSN1 = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15581   auto *RHSN1 = dyn_cast<ConstantSDNode>(RHS->getOperand(1));
15582   if (!LHSN1 || LHSN1 != RHSN1 || !RHSN1->isZero())
15583     return SDValue();
15584 
15585   SDValue Op1 = LHS->getOperand(0);
15586   SDValue Op2 = RHS->getOperand(0);
15587   EVT OpVT1 = Op1.getValueType();
15588   EVT OpVT2 = Op2.getValueType();
15589   if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 ||
15590       Op2.getOpcode() != AArch64ISD::UADDV ||
15591       OpVT1.getVectorElementType() != VT)
15592     return SDValue();
15593 
15594   SDValue Val1 = Op1.getOperand(0);
15595   SDValue Val2 = Op2.getOperand(0);
15596   EVT ValVT = Val1->getValueType(0);
15597   SDLoc DL(N);
15598   SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2);
15599   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
15600                      DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal),
15601                      DAG.getConstant(0, DL, MVT::i64));
15602 }
15603 
15604 /// Perform the scalar expression combine in the form of:
15605 ///   CSEL(c, 1, cc) + b => CSINC(b+c, b, cc)
15606 ///   CSNEG(c, -1, cc) + b => CSINC(b+c, b, cc)
15607 static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {
15608   EVT VT = N->getValueType(0);
15609   if (!VT.isScalarInteger() || N->getOpcode() != ISD::ADD)
15610     return SDValue();
15611 
15612   SDValue LHS = N->getOperand(0);
15613   SDValue RHS = N->getOperand(1);
15614 
15615   // Handle commutivity.
15616   if (LHS.getOpcode() != AArch64ISD::CSEL &&
15617       LHS.getOpcode() != AArch64ISD::CSNEG) {
15618     std::swap(LHS, RHS);
15619     if (LHS.getOpcode() != AArch64ISD::CSEL &&
15620         LHS.getOpcode() != AArch64ISD::CSNEG) {
15621       return SDValue();
15622     }
15623   }
15624 
15625   if (!LHS.hasOneUse())
15626     return SDValue();
15627 
15628   AArch64CC::CondCode AArch64CC =
15629       static_cast<AArch64CC::CondCode>(LHS.getConstantOperandVal(2));
15630 
15631   // The CSEL should include a const one operand, and the CSNEG should include
15632   // One or NegOne operand.
15633   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0));
15634   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
15635   if (!CTVal || !CFVal)
15636     return SDValue();
15637 
15638   if (!(LHS.getOpcode() == AArch64ISD::CSEL &&
15639         (CTVal->isOne() || CFVal->isOne())) &&
15640       !(LHS.getOpcode() == AArch64ISD::CSNEG &&
15641         (CTVal->isOne() || CFVal->isAllOnes())))
15642     return SDValue();
15643 
15644   // Switch CSEL(1, c, cc) to CSEL(c, 1, !cc)
15645   if (LHS.getOpcode() == AArch64ISD::CSEL && CTVal->isOne() &&
15646       !CFVal->isOne()) {
15647     std::swap(CTVal, CFVal);
15648     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15649   }
15650 
15651   SDLoc DL(N);
15652   // Switch CSNEG(1, c, cc) to CSNEG(-c, -1, !cc)
15653   if (LHS.getOpcode() == AArch64ISD::CSNEG && CTVal->isOne() &&
15654       !CFVal->isAllOnes()) {
15655     APInt C = -1 * CFVal->getAPIntValue();
15656     CTVal = cast<ConstantSDNode>(DAG.getConstant(C, DL, VT));
15657     CFVal = cast<ConstantSDNode>(DAG.getAllOnesConstant(DL, VT));
15658     AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC);
15659   }
15660 
15661   // It might be neutral for larger constants, as the immediate need to be
15662   // materialized in a register.
15663   APInt ADDC = CTVal->getAPIntValue();
15664   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15665   if (!TLI.isLegalAddImmediate(ADDC.getSExtValue()))
15666     return SDValue();
15667 
15668   assert(((LHS.getOpcode() == AArch64ISD::CSEL && CFVal->isOne()) ||
15669           (LHS.getOpcode() == AArch64ISD::CSNEG && CFVal->isAllOnes())) &&
15670          "Unexpected constant value");
15671 
15672   SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0));
15673   SDValue CCVal = DAG.getConstant(AArch64CC, DL, MVT::i32);
15674   SDValue Cmp = LHS.getOperand(3);
15675 
15676   return DAG.getNode(AArch64ISD::CSINC, DL, VT, NewNode, RHS, CCVal, Cmp);
15677 }
15678 
15679 // ADD(UDOT(zero, x, y), A) -->  UDOT(A, x, y)
15680 static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
15681   EVT VT = N->getValueType(0);
15682   if (N->getOpcode() != ISD::ADD)
15683     return SDValue();
15684 
15685   SDValue Dot = N->getOperand(0);
15686   SDValue A = N->getOperand(1);
15687   // Handle commutivity
15688   auto isZeroDot = [](SDValue Dot) {
15689     return (Dot.getOpcode() == AArch64ISD::UDOT ||
15690             Dot.getOpcode() == AArch64ISD::SDOT) &&
15691            isZerosVector(Dot.getOperand(0).getNode());
15692   };
15693   if (!isZeroDot(Dot))
15694     std::swap(Dot, A);
15695   if (!isZeroDot(Dot))
15696     return SDValue();
15697 
15698   return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
15699                      Dot.getOperand(2));
15700 }
15701 
15702 static bool isNegatedInteger(SDValue Op) {
15703   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
15704 }
15705 
15706 static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {
15707   SDLoc DL(Op);
15708   EVT VT = Op.getValueType();
15709   SDValue Zero = DAG.getConstant(0, DL, VT);
15710   return DAG.getNode(ISD::SUB, DL, VT, Zero, Op);
15711 }
15712 
15713 // Try to fold
15714 //
15715 // (neg (csel X, Y)) -> (csel (neg X), (neg Y))
15716 //
15717 // The folding helps csel to be matched with csneg without generating
15718 // redundant neg instruction, which includes negation of the csel expansion
15719 // of abs node lowered by lowerABS.
15720 static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {
15721   if (!isNegatedInteger(SDValue(N, 0)))
15722     return SDValue();
15723 
15724   SDValue CSel = N->getOperand(1);
15725   if (CSel.getOpcode() != AArch64ISD::CSEL || !CSel->hasOneUse())
15726     return SDValue();
15727 
15728   SDValue N0 = CSel.getOperand(0);
15729   SDValue N1 = CSel.getOperand(1);
15730 
15731   // If both of them is not negations, it's not worth the folding as it
15732   // introduces two additional negations while reducing one negation.
15733   if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
15734     return SDValue();
15735 
15736   SDValue N0N = getNegatedInteger(N0, DAG);
15737   SDValue N1N = getNegatedInteger(N1, DAG);
15738 
15739   SDLoc DL(N);
15740   EVT VT = CSel.getValueType();
15741   return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0N, N1N, CSel.getOperand(2),
15742                      CSel.getOperand(3));
15743 }
15744 
15745 // The basic add/sub long vector instructions have variants with "2" on the end
15746 // which act on the high-half of their inputs. They are normally matched by
15747 // patterns like:
15748 //
15749 // (add (zeroext (extract_high LHS)),
15750 //      (zeroext (extract_high RHS)))
15751 // -> uaddl2 vD, vN, vM
15752 //
15753 // However, if one of the extracts is something like a duplicate, this
15754 // instruction can still be used profitably. This function puts the DAG into a
15755 // more appropriate form for those patterns to trigger.
15756 static SDValue performAddSubLongCombine(SDNode *N,
15757                                         TargetLowering::DAGCombinerInfo &DCI,
15758                                         SelectionDAG &DAG) {
15759   if (DCI.isBeforeLegalizeOps())
15760     return SDValue();
15761 
15762   MVT VT = N->getSimpleValueType(0);
15763   if (!VT.is128BitVector()) {
15764     if (N->getOpcode() == ISD::ADD)
15765       return performSetccAddFolding(N, DAG);
15766     return SDValue();
15767   }
15768 
15769   // Make sure both branches are extended in the same way.
15770   SDValue LHS = N->getOperand(0);
15771   SDValue RHS = N->getOperand(1);
15772   if ((LHS.getOpcode() != ISD::ZERO_EXTEND &&
15773        LHS.getOpcode() != ISD::SIGN_EXTEND) ||
15774       LHS.getOpcode() != RHS.getOpcode())
15775     return SDValue();
15776 
15777   unsigned ExtType = LHS.getOpcode();
15778 
15779   // It's not worth doing if at least one of the inputs isn't already an
15780   // extract, but we don't know which it'll be so we have to try both.
15781   if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) {
15782     RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG);
15783     if (!RHS.getNode())
15784       return SDValue();
15785 
15786     RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS);
15787   } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) {
15788     LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG);
15789     if (!LHS.getNode())
15790       return SDValue();
15791 
15792     LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS);
15793   }
15794 
15795   return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS);
15796 }
15797 
15798 static bool isCMP(SDValue Op) {
15799   return Op.getOpcode() == AArch64ISD::SUBS &&
15800          !Op.getNode()->hasAnyUseOfValue(0);
15801 }
15802 
15803 // (CSEL 1 0 CC Cond) => CC
15804 // (CSEL 0 1 CC Cond) => !CC
15805 static Optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {
15806   if (Op.getOpcode() != AArch64ISD::CSEL)
15807     return None;
15808   auto CC = static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
15809   if (CC == AArch64CC::AL || CC == AArch64CC::NV)
15810     return None;
15811   SDValue OpLHS = Op.getOperand(0);
15812   SDValue OpRHS = Op.getOperand(1);
15813   if (isOneConstant(OpLHS) && isNullConstant(OpRHS))
15814     return CC;
15815   if (isNullConstant(OpLHS) && isOneConstant(OpRHS))
15816     return getInvertedCondCode(CC);
15817 
15818   return None;
15819 }
15820 
15821 // (ADC{S} l r (CMP (CSET HS carry) 1)) => (ADC{S} l r carry)
15822 // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
15823 static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {
15824   SDValue CmpOp = Op->getOperand(2);
15825   if (!isCMP(CmpOp))
15826     return SDValue();
15827 
15828   if (IsAdd) {
15829     if (!isOneConstant(CmpOp.getOperand(1)))
15830       return SDValue();
15831   } else {
15832     if (!isNullConstant(CmpOp.getOperand(0)))
15833       return SDValue();
15834   }
15835 
15836   SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1);
15837   auto CC = getCSETCondCode(CsetOp);
15838   if (CC != (IsAdd ? AArch64CC::HS : AArch64CC::LO))
15839     return SDValue();
15840 
15841   return DAG.getNode(Op->getOpcode(), SDLoc(Op), Op->getVTList(),
15842                      Op->getOperand(0), Op->getOperand(1),
15843                      CsetOp.getOperand(3));
15844 }
15845 
15846 // (ADC x 0 cond) => (CINC x HS cond)
15847 static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {
15848   SDValue LHS = N->getOperand(0);
15849   SDValue RHS = N->getOperand(1);
15850   SDValue Cond = N->getOperand(2);
15851 
15852   if (!isNullConstant(RHS))
15853     return SDValue();
15854 
15855   EVT VT = N->getValueType(0);
15856   SDLoc DL(N);
15857 
15858   // (CINC x cc cond) <=> (CSINC x x !cc cond)
15859   SDValue CC = DAG.getConstant(AArch64CC::LO, DL, MVT::i32);
15860   return DAG.getNode(AArch64ISD::CSINC, DL, VT, LHS, LHS, CC, Cond);
15861 }
15862 
15863 // Transform vector add(zext i8 to i32, zext i8 to i32)
15864 //  into sext(add(zext(i8 to i16), zext(i8 to i16)) to i32)
15865 // This allows extra uses of saddl/uaddl at the lower vector widths, and less
15866 // extends.
15867 static SDValue performVectorAddSubExtCombine(SDNode *N, SelectionDAG &DAG) {
15868   EVT VT = N->getValueType(0);
15869   if (!VT.isFixedLengthVector() || VT.getSizeInBits() <= 128 ||
15870       (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
15871        N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) ||
15872       (N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
15873        N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND) ||
15874       N->getOperand(0).getOperand(0).getValueType() !=
15875           N->getOperand(1).getOperand(0).getValueType())
15876     return SDValue();
15877 
15878   SDValue N0 = N->getOperand(0).getOperand(0);
15879   SDValue N1 = N->getOperand(1).getOperand(0);
15880   EVT InVT = N0.getValueType();
15881 
15882   EVT S1 = InVT.getScalarType();
15883   EVT S2 = VT.getScalarType();
15884   if ((S2 == MVT::i32 && S1 == MVT::i8) ||
15885       (S2 == MVT::i64 && (S1 == MVT::i8 || S1 == MVT::i16))) {
15886     SDLoc DL(N);
15887     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(),
15888                                   S2.getHalfSizedIntegerVT(*DAG.getContext()),
15889                                   VT.getVectorElementCount());
15890     SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0);
15891     SDValue NewN1 = DAG.getNode(N->getOperand(1).getOpcode(), DL, HalfVT, N1);
15892     SDValue NewOp = DAG.getNode(N->getOpcode(), DL, HalfVT, NewN0, NewN1);
15893     return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewOp);
15894   }
15895   return SDValue();
15896 }
15897 
15898 static SDValue performAddSubCombine(SDNode *N,
15899                                     TargetLowering::DAGCombinerInfo &DCI,
15900                                     SelectionDAG &DAG) {
15901   // Try to change sum of two reductions.
15902   if (SDValue Val = performAddUADDVCombine(N, DAG))
15903     return Val;
15904   if (SDValue Val = performAddDotCombine(N, DAG))
15905     return Val;
15906   if (SDValue Val = performAddCSelIntoCSinc(N, DAG))
15907     return Val;
15908   if (SDValue Val = performNegCSelCombine(N, DAG))
15909     return Val;
15910   if (SDValue Val = performVectorAddSubExtCombine(N, DAG))
15911     return Val;
15912 
15913   return performAddSubLongCombine(N, DCI, DAG);
15914 }
15915 
15916 // Massage DAGs which we can use the high-half "long" operations on into
15917 // something isel will recognize better. E.g.
15918 //
15919 // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) -->
15920 //   (aarch64_neon_umull (extract_high (v2i64 vec)))
15921 //                     (extract_high (v2i64 (dup128 scalar)))))
15922 //
15923 static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
15924                                        TargetLowering::DAGCombinerInfo &DCI,
15925                                        SelectionDAG &DAG) {
15926   if (DCI.isBeforeLegalizeOps())
15927     return SDValue();
15928 
15929   SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1);
15930   SDValue RHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 1 : 2);
15931   assert(LHS.getValueType().is64BitVector() &&
15932          RHS.getValueType().is64BitVector() &&
15933          "unexpected shape for long operation");
15934 
15935   // Either node could be a DUP, but it's not worth doing both of them (you'd
15936   // just as well use the non-high version) so look for a corresponding extract
15937   // operation on the other "wing".
15938   if (isEssentiallyExtractHighSubvector(LHS)) {
15939     RHS = tryExtendDUPToExtractHigh(RHS, DAG);
15940     if (!RHS.getNode())
15941       return SDValue();
15942   } else if (isEssentiallyExtractHighSubvector(RHS)) {
15943     LHS = tryExtendDUPToExtractHigh(LHS, DAG);
15944     if (!LHS.getNode())
15945       return SDValue();
15946   }
15947 
15948   if (IID == Intrinsic::not_intrinsic)
15949     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
15950 
15951   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0),
15952                      N->getOperand(0), LHS, RHS);
15953 }
15954 
15955 static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
15956   MVT ElemTy = N->getSimpleValueType(0).getScalarType();
15957   unsigned ElemBits = ElemTy.getSizeInBits();
15958 
15959   int64_t ShiftAmount;
15960   if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) {
15961     APInt SplatValue, SplatUndef;
15962     unsigned SplatBitSize;
15963     bool HasAnyUndefs;
15964     if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
15965                               HasAnyUndefs, ElemBits) ||
15966         SplatBitSize != ElemBits)
15967       return SDValue();
15968 
15969     ShiftAmount = SplatValue.getSExtValue();
15970   } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
15971     ShiftAmount = CVN->getSExtValue();
15972   } else
15973     return SDValue();
15974 
15975   unsigned Opcode;
15976   bool IsRightShift;
15977   switch (IID) {
15978   default:
15979     llvm_unreachable("Unknown shift intrinsic");
15980   case Intrinsic::aarch64_neon_sqshl:
15981     Opcode = AArch64ISD::SQSHL_I;
15982     IsRightShift = false;
15983     break;
15984   case Intrinsic::aarch64_neon_uqshl:
15985     Opcode = AArch64ISD::UQSHL_I;
15986     IsRightShift = false;
15987     break;
15988   case Intrinsic::aarch64_neon_srshl:
15989     Opcode = AArch64ISD::SRSHR_I;
15990     IsRightShift = true;
15991     break;
15992   case Intrinsic::aarch64_neon_urshl:
15993     Opcode = AArch64ISD::URSHR_I;
15994     IsRightShift = true;
15995     break;
15996   case Intrinsic::aarch64_neon_sqshlu:
15997     Opcode = AArch64ISD::SQSHLU_I;
15998     IsRightShift = false;
15999     break;
16000   case Intrinsic::aarch64_neon_sshl:
16001   case Intrinsic::aarch64_neon_ushl:
16002     // For positive shift amounts we can use SHL, as ushl/sshl perform a regular
16003     // left shift for positive shift amounts. Below, we only replace the current
16004     // node with VSHL, if this condition is met.
16005     Opcode = AArch64ISD::VSHL;
16006     IsRightShift = false;
16007     break;
16008   }
16009 
16010   if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
16011     SDLoc dl(N);
16012     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16013                        DAG.getConstant(-ShiftAmount, dl, MVT::i32));
16014   } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
16015     SDLoc dl(N);
16016     return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
16017                        DAG.getConstant(ShiftAmount, dl, MVT::i32));
16018   }
16019 
16020   return SDValue();
16021 }
16022 
16023 // The CRC32[BH] instructions ignore the high bits of their data operand. Since
16024 // the intrinsics must be legal and take an i32, this means there's almost
16025 // certainly going to be a zext in the DAG which we can eliminate.
16026 static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
16027   SDValue AndN = N->getOperand(2);
16028   if (AndN.getOpcode() != ISD::AND)
16029     return SDValue();
16030 
16031   ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1));
16032   if (!CMask || CMask->getZExtValue() != Mask)
16033     return SDValue();
16034 
16035   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32,
16036                      N->getOperand(0), N->getOperand(1), AndN.getOperand(0));
16037 }
16038 
16039 static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
16040                                            SelectionDAG &DAG) {
16041   SDLoc dl(N);
16042   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
16043                      DAG.getNode(Opc, dl,
16044                                  N->getOperand(1).getSimpleValueType(),
16045                                  N->getOperand(1)),
16046                      DAG.getConstant(0, dl, MVT::i64));
16047 }
16048 
16049 static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {
16050   SDLoc DL(N);
16051   SDValue Op1 = N->getOperand(1);
16052   SDValue Op2 = N->getOperand(2);
16053   EVT ScalarTy = Op2.getValueType();
16054   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16055     ScalarTy = MVT::i32;
16056 
16057   // Lower index_vector(base, step) to mul(step step_vector(1)) + splat(base).
16058   SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0));
16059   SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2);
16060   SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step);
16061   SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1);
16062   return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base);
16063 }
16064 
16065 static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {
16066   SDLoc dl(N);
16067   SDValue Scalar = N->getOperand(3);
16068   EVT ScalarTy = Scalar.getValueType();
16069 
16070   if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
16071     Scalar = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Scalar);
16072 
16073   SDValue Passthru = N->getOperand(1);
16074   SDValue Pred = N->getOperand(2);
16075   return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0),
16076                      Pred, Scalar, Passthru);
16077 }
16078 
16079 static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {
16080   SDLoc dl(N);
16081   LLVMContext &Ctx = *DAG.getContext();
16082   EVT VT = N->getValueType(0);
16083 
16084   assert(VT.isScalableVector() && "Expected a scalable vector.");
16085 
16086   // Current lowering only supports the SVE-ACLE types.
16087   if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock)
16088     return SDValue();
16089 
16090   unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8;
16091   unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8;
16092   EVT ByteVT =
16093       EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize));
16094 
16095   // Convert everything to the domain of EXT (i.e bytes).
16096   SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1));
16097   SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2));
16098   SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3),
16099                             DAG.getConstant(ElemSize, dl, MVT::i32));
16100 
16101   SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2);
16102   return DAG.getNode(ISD::BITCAST, dl, VT, EXT);
16103 }
16104 
16105 static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
16106                                         TargetLowering::DAGCombinerInfo &DCI,
16107                                         SelectionDAG &DAG) {
16108   if (DCI.isBeforeLegalize())
16109     return SDValue();
16110 
16111   SDValue Comparator = N->getOperand(3);
16112   if (Comparator.getOpcode() == AArch64ISD::DUP ||
16113       Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
16114     unsigned IID = getIntrinsicID(N);
16115     EVT VT = N->getValueType(0);
16116     EVT CmpVT = N->getOperand(2).getValueType();
16117     SDValue Pred = N->getOperand(1);
16118     SDValue Imm;
16119     SDLoc DL(N);
16120 
16121     switch (IID) {
16122     default:
16123       llvm_unreachable("Called with wrong intrinsic!");
16124       break;
16125 
16126     // Signed comparisons
16127     case Intrinsic::aarch64_sve_cmpeq_wide:
16128     case Intrinsic::aarch64_sve_cmpne_wide:
16129     case Intrinsic::aarch64_sve_cmpge_wide:
16130     case Intrinsic::aarch64_sve_cmpgt_wide:
16131     case Intrinsic::aarch64_sve_cmplt_wide:
16132     case Intrinsic::aarch64_sve_cmple_wide: {
16133       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16134         int64_t ImmVal = CN->getSExtValue();
16135         if (ImmVal >= -16 && ImmVal <= 15)
16136           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16137         else
16138           return SDValue();
16139       }
16140       break;
16141     }
16142     // Unsigned comparisons
16143     case Intrinsic::aarch64_sve_cmphs_wide:
16144     case Intrinsic::aarch64_sve_cmphi_wide:
16145     case Intrinsic::aarch64_sve_cmplo_wide:
16146     case Intrinsic::aarch64_sve_cmpls_wide:  {
16147       if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
16148         uint64_t ImmVal = CN->getZExtValue();
16149         if (ImmVal <= 127)
16150           Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
16151         else
16152           return SDValue();
16153       }
16154       break;
16155     }
16156     }
16157 
16158     if (!Imm)
16159       return SDValue();
16160 
16161     SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
16162     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, VT, Pred,
16163                        N->getOperand(2), Splat, DAG.getCondCode(CC));
16164   }
16165 
16166   return SDValue();
16167 }
16168 
16169 static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
16170                         AArch64CC::CondCode Cond) {
16171   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16172 
16173   SDLoc DL(Op);
16174   assert(Op.getValueType().isScalableVector() &&
16175          TLI.isTypeLegal(Op.getValueType()) &&
16176          "Expected legal scalable vector type!");
16177   assert(Op.getValueType() == Pg.getValueType() &&
16178          "Expected same type for PTEST operands");
16179 
16180   // Ensure target specific opcodes are using legal type.
16181   EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
16182   SDValue TVal = DAG.getConstant(1, DL, OutVT);
16183   SDValue FVal = DAG.getConstant(0, DL, OutVT);
16184 
16185   // Ensure operands have type nxv16i1.
16186   if (Op.getValueType() != MVT::nxv16i1) {
16187     if ((Cond == AArch64CC::ANY_ACTIVE || Cond == AArch64CC::NONE_ACTIVE) &&
16188         isZeroingInactiveLanes(Op))
16189       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pg);
16190     else
16191       Pg = getSVEPredicateBitCast(MVT::nxv16i1, Pg, DAG);
16192     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Op);
16193   }
16194 
16195   // Set condition code (CC) flags.
16196   SDValue Test = DAG.getNode(AArch64ISD::PTEST, DL, MVT::Other, Pg, Op);
16197 
16198   // Convert CC to integer based on requested condition.
16199   // NOTE: Cond is inverted to promote CSEL's removal when it feeds a compare.
16200   SDValue CC = DAG.getConstant(getInvertedCondCode(Cond), DL, MVT::i32);
16201   SDValue Res = DAG.getNode(AArch64ISD::CSEL, DL, OutVT, FVal, TVal, CC, Test);
16202   return DAG.getZExtOrTrunc(Res, DL, VT);
16203 }
16204 
16205 static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
16206                                       SelectionDAG &DAG) {
16207   SDLoc DL(N);
16208 
16209   SDValue Pred = N->getOperand(1);
16210   SDValue VecToReduce = N->getOperand(2);
16211 
16212   // NOTE: The integer reduction's result type is not always linked to the
16213   // operand's element type so we construct it from the intrinsic's result type.
16214   EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0));
16215   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16216 
16217   // SVE reductions set the whole vector register with the first element
16218   // containing the reduction result, which we'll now extract.
16219   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16220   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16221                      Zero);
16222 }
16223 
16224 static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
16225                                      SelectionDAG &DAG) {
16226   SDLoc DL(N);
16227 
16228   SDValue Pred = N->getOperand(1);
16229   SDValue VecToReduce = N->getOperand(2);
16230 
16231   EVT ReduceVT = VecToReduce.getValueType();
16232   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, VecToReduce);
16233 
16234   // SVE reductions set the whole vector register with the first element
16235   // containing the reduction result, which we'll now extract.
16236   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16237   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16238                      Zero);
16239 }
16240 
16241 static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
16242                                             SelectionDAG &DAG) {
16243   SDLoc DL(N);
16244 
16245   SDValue Pred = N->getOperand(1);
16246   SDValue InitVal = N->getOperand(2);
16247   SDValue VecToReduce = N->getOperand(3);
16248   EVT ReduceVT = VecToReduce.getValueType();
16249 
16250   // Ordered reductions use the first lane of the result vector as the
16251   // reduction's initial value.
16252   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
16253   InitVal = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ReduceVT,
16254                         DAG.getUNDEF(ReduceVT), InitVal, Zero);
16255 
16256   SDValue Reduce = DAG.getNode(Opc, DL, ReduceVT, Pred, InitVal, VecToReduce);
16257 
16258   // SVE reductions set the whole vector register with the first element
16259   // containing the reduction result, which we'll now extract.
16260   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce,
16261                      Zero);
16262 }
16263 
16264 static bool isAllInactivePredicate(SDValue N) {
16265   // Look through cast.
16266   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
16267     N = N.getOperand(0);
16268 
16269   return ISD::isConstantSplatVectorAllZeros(N.getNode());
16270 }
16271 
16272 static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
16273   unsigned NumElts = N.getValueType().getVectorMinNumElements();
16274 
16275   // Look through cast.
16276   while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
16277     N = N.getOperand(0);
16278     // When reinterpreting from a type with fewer elements the "new" elements
16279     // are not active, so bail if they're likely to be used.
16280     if (N.getValueType().getVectorMinNumElements() < NumElts)
16281       return false;
16282   }
16283 
16284   if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
16285     return true;
16286 
16287   // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
16288   // or smaller than the implicit element type represented by N.
16289   // NOTE: A larger element count implies a smaller element type.
16290   if (N.getOpcode() == AArch64ISD::PTRUE &&
16291       N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
16292     return N.getValueType().getVectorMinNumElements() >= NumElts;
16293 
16294   // If we're compiling for a specific vector-length, we can check if the
16295   // pattern's VL equals that of the scalable vector at runtime.
16296   if (N.getOpcode() == AArch64ISD::PTRUE) {
16297     const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
16298     unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
16299     unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
16300     if (MaxSVESize && MinSVESize == MaxSVESize) {
16301       unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
16302       unsigned PatNumElts =
16303           getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
16304       return PatNumElts == (NumElts * VScale);
16305     }
16306   }
16307 
16308   return false;
16309 }
16310 
16311 // If a merged operation has no inactive lanes we can relax it to a predicated
16312 // or unpredicated operation, which potentially allows better isel (perhaps
16313 // using immediate forms) or relaxing register reuse requirements.
16314 static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
16315                                        SelectionDAG &DAG, bool UnpredOp = false,
16316                                        bool SwapOperands = false) {
16317   assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Expected intrinsic!");
16318   assert(N->getNumOperands() == 4 && "Expected 3 operand intrinsic!");
16319   SDValue Pg = N->getOperand(1);
16320   SDValue Op1 = N->getOperand(SwapOperands ? 3 : 2);
16321   SDValue Op2 = N->getOperand(SwapOperands ? 2 : 3);
16322 
16323   // ISD way to specify an all active predicate.
16324   if (isAllActivePredicate(DAG, Pg)) {
16325     if (UnpredOp)
16326       return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2);
16327 
16328     return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2);
16329   }
16330 
16331   // FUTURE: SplatVector(true)
16332   return SDValue();
16333 }
16334 
16335 static SDValue performIntrinsicCombine(SDNode *N,
16336                                        TargetLowering::DAGCombinerInfo &DCI,
16337                                        const AArch64Subtarget *Subtarget) {
16338   SelectionDAG &DAG = DCI.DAG;
16339   unsigned IID = getIntrinsicID(N);
16340   switch (IID) {
16341   default:
16342     break;
16343   case Intrinsic::get_active_lane_mask: {
16344     SDValue Res = SDValue();
16345     EVT VT = N->getValueType(0);
16346     if (VT.isFixedLengthVector()) {
16347       // We can use the SVE whilelo instruction to lower this intrinsic by
16348       // creating the appropriate sequence of scalable vector operations and
16349       // then extracting a fixed-width subvector from the scalable vector.
16350 
16351       SDLoc DL(N);
16352       SDValue ID =
16353           DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, DL, MVT::i64);
16354 
16355       EVT WhileVT = EVT::getVectorVT(
16356           *DAG.getContext(), MVT::i1,
16357           ElementCount::getScalable(VT.getVectorNumElements()));
16358 
16359       // Get promoted scalable vector VT, i.e. promote nxv4i1 -> nxv4i32.
16360       EVT PromVT = getPromotedVTForPredicate(WhileVT);
16361 
16362       // Get the fixed-width equivalent of PromVT for extraction.
16363       EVT ExtVT =
16364           EVT::getVectorVT(*DAG.getContext(), PromVT.getVectorElementType(),
16365                            VT.getVectorElementCount());
16366 
16367       Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WhileVT, ID,
16368                         N->getOperand(1), N->getOperand(2));
16369       Res = DAG.getNode(ISD::SIGN_EXTEND, DL, PromVT, Res);
16370       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, Res,
16371                         DAG.getConstant(0, DL, MVT::i64));
16372       Res = DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
16373     }
16374     return Res;
16375   }
16376   case Intrinsic::aarch64_neon_vcvtfxs2fp:
16377   case Intrinsic::aarch64_neon_vcvtfxu2fp:
16378     return tryCombineFixedPointConvert(N, DCI, DAG);
16379   case Intrinsic::aarch64_neon_saddv:
16380     return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG);
16381   case Intrinsic::aarch64_neon_uaddv:
16382     return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG);
16383   case Intrinsic::aarch64_neon_sminv:
16384     return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG);
16385   case Intrinsic::aarch64_neon_uminv:
16386     return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG);
16387   case Intrinsic::aarch64_neon_smaxv:
16388     return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG);
16389   case Intrinsic::aarch64_neon_umaxv:
16390     return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG);
16391   case Intrinsic::aarch64_neon_fmax:
16392     return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0),
16393                        N->getOperand(1), N->getOperand(2));
16394   case Intrinsic::aarch64_neon_fmin:
16395     return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0),
16396                        N->getOperand(1), N->getOperand(2));
16397   case Intrinsic::aarch64_neon_fmaxnm:
16398     return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0),
16399                        N->getOperand(1), N->getOperand(2));
16400   case Intrinsic::aarch64_neon_fminnm:
16401     return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0),
16402                        N->getOperand(1), N->getOperand(2));
16403   case Intrinsic::aarch64_neon_smull:
16404     return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0),
16405                        N->getOperand(1), N->getOperand(2));
16406   case Intrinsic::aarch64_neon_umull:
16407     return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0),
16408                        N->getOperand(1), N->getOperand(2));
16409   case Intrinsic::aarch64_neon_pmull:
16410   case Intrinsic::aarch64_neon_sqdmull:
16411     return tryCombineLongOpWithDup(IID, N, DCI, DAG);
16412   case Intrinsic::aarch64_neon_sqshl:
16413   case Intrinsic::aarch64_neon_uqshl:
16414   case Intrinsic::aarch64_neon_sqshlu:
16415   case Intrinsic::aarch64_neon_srshl:
16416   case Intrinsic::aarch64_neon_urshl:
16417   case Intrinsic::aarch64_neon_sshl:
16418   case Intrinsic::aarch64_neon_ushl:
16419     return tryCombineShiftImm(IID, N, DAG);
16420   case Intrinsic::aarch64_crc32b:
16421   case Intrinsic::aarch64_crc32cb:
16422     return tryCombineCRC32(0xff, N, DAG);
16423   case Intrinsic::aarch64_crc32h:
16424   case Intrinsic::aarch64_crc32ch:
16425     return tryCombineCRC32(0xffff, N, DAG);
16426   case Intrinsic::aarch64_sve_saddv:
16427     // There is no i64 version of SADDV because the sign is irrelevant.
16428     if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64)
16429       return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16430     else
16431       return combineSVEReductionInt(N, AArch64ISD::SADDV_PRED, DAG);
16432   case Intrinsic::aarch64_sve_uaddv:
16433     return combineSVEReductionInt(N, AArch64ISD::UADDV_PRED, DAG);
16434   case Intrinsic::aarch64_sve_smaxv:
16435     return combineSVEReductionInt(N, AArch64ISD::SMAXV_PRED, DAG);
16436   case Intrinsic::aarch64_sve_umaxv:
16437     return combineSVEReductionInt(N, AArch64ISD::UMAXV_PRED, DAG);
16438   case Intrinsic::aarch64_sve_sminv:
16439     return combineSVEReductionInt(N, AArch64ISD::SMINV_PRED, DAG);
16440   case Intrinsic::aarch64_sve_uminv:
16441     return combineSVEReductionInt(N, AArch64ISD::UMINV_PRED, DAG);
16442   case Intrinsic::aarch64_sve_orv:
16443     return combineSVEReductionInt(N, AArch64ISD::ORV_PRED, DAG);
16444   case Intrinsic::aarch64_sve_eorv:
16445     return combineSVEReductionInt(N, AArch64ISD::EORV_PRED, DAG);
16446   case Intrinsic::aarch64_sve_andv:
16447     return combineSVEReductionInt(N, AArch64ISD::ANDV_PRED, DAG);
16448   case Intrinsic::aarch64_sve_index:
16449     return LowerSVEIntrinsicIndex(N, DAG);
16450   case Intrinsic::aarch64_sve_dup:
16451     return LowerSVEIntrinsicDUP(N, DAG);
16452   case Intrinsic::aarch64_sve_dup_x:
16453     return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0),
16454                        N->getOperand(1));
16455   case Intrinsic::aarch64_sve_ext:
16456     return LowerSVEIntrinsicEXT(N, DAG);
16457   case Intrinsic::aarch64_sve_mul:
16458     return convertMergedOpToPredOp(N, AArch64ISD::MUL_PRED, DAG);
16459   case Intrinsic::aarch64_sve_smulh:
16460     return convertMergedOpToPredOp(N, AArch64ISD::MULHS_PRED, DAG);
16461   case Intrinsic::aarch64_sve_umulh:
16462     return convertMergedOpToPredOp(N, AArch64ISD::MULHU_PRED, DAG);
16463   case Intrinsic::aarch64_sve_smin:
16464     return convertMergedOpToPredOp(N, AArch64ISD::SMIN_PRED, DAG);
16465   case Intrinsic::aarch64_sve_umin:
16466     return convertMergedOpToPredOp(N, AArch64ISD::UMIN_PRED, DAG);
16467   case Intrinsic::aarch64_sve_smax:
16468     return convertMergedOpToPredOp(N, AArch64ISD::SMAX_PRED, DAG);
16469   case Intrinsic::aarch64_sve_umax:
16470     return convertMergedOpToPredOp(N, AArch64ISD::UMAX_PRED, DAG);
16471   case Intrinsic::aarch64_sve_lsl:
16472     return convertMergedOpToPredOp(N, AArch64ISD::SHL_PRED, DAG);
16473   case Intrinsic::aarch64_sve_lsr:
16474     return convertMergedOpToPredOp(N, AArch64ISD::SRL_PRED, DAG);
16475   case Intrinsic::aarch64_sve_asr:
16476     return convertMergedOpToPredOp(N, AArch64ISD::SRA_PRED, DAG);
16477   case Intrinsic::aarch64_sve_fadd:
16478     return convertMergedOpToPredOp(N, AArch64ISD::FADD_PRED, DAG);
16479   case Intrinsic::aarch64_sve_fsub:
16480     return convertMergedOpToPredOp(N, AArch64ISD::FSUB_PRED, DAG);
16481   case Intrinsic::aarch64_sve_fmul:
16482     return convertMergedOpToPredOp(N, AArch64ISD::FMUL_PRED, DAG);
16483   case Intrinsic::aarch64_sve_add:
16484     return convertMergedOpToPredOp(N, ISD::ADD, DAG, true);
16485   case Intrinsic::aarch64_sve_sub:
16486     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true);
16487   case Intrinsic::aarch64_sve_subr:
16488     return convertMergedOpToPredOp(N, ISD::SUB, DAG, true, true);
16489   case Intrinsic::aarch64_sve_and:
16490     return convertMergedOpToPredOp(N, ISD::AND, DAG, true);
16491   case Intrinsic::aarch64_sve_bic:
16492     return convertMergedOpToPredOp(N, AArch64ISD::BIC, DAG, true);
16493   case Intrinsic::aarch64_sve_eor:
16494     return convertMergedOpToPredOp(N, ISD::XOR, DAG, true);
16495   case Intrinsic::aarch64_sve_orr:
16496     return convertMergedOpToPredOp(N, ISD::OR, DAG, true);
16497   case Intrinsic::aarch64_sve_sabd:
16498     return convertMergedOpToPredOp(N, ISD::ABDS, DAG, true);
16499   case Intrinsic::aarch64_sve_uabd:
16500     return convertMergedOpToPredOp(N, ISD::ABDU, DAG, true);
16501   case Intrinsic::aarch64_sve_sqadd:
16502     return convertMergedOpToPredOp(N, ISD::SADDSAT, DAG, true);
16503   case Intrinsic::aarch64_sve_sqsub:
16504     return convertMergedOpToPredOp(N, ISD::SSUBSAT, DAG, true);
16505   case Intrinsic::aarch64_sve_uqadd:
16506     return convertMergedOpToPredOp(N, ISD::UADDSAT, DAG, true);
16507   case Intrinsic::aarch64_sve_uqsub:
16508     return convertMergedOpToPredOp(N, ISD::USUBSAT, DAG, true);
16509   case Intrinsic::aarch64_sve_sqadd_x:
16510     return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0),
16511                        N->getOperand(1), N->getOperand(2));
16512   case Intrinsic::aarch64_sve_sqsub_x:
16513     return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0),
16514                        N->getOperand(1), N->getOperand(2));
16515   case Intrinsic::aarch64_sve_uqadd_x:
16516     return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0),
16517                        N->getOperand(1), N->getOperand(2));
16518   case Intrinsic::aarch64_sve_uqsub_x:
16519     return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0),
16520                        N->getOperand(1), N->getOperand(2));
16521   case Intrinsic::aarch64_sve_asrd:
16522     return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0),
16523                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16524   case Intrinsic::aarch64_sve_cmphs:
16525     if (!N->getOperand(2).getValueType().isFloatingPoint())
16526       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16527                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16528                          N->getOperand(3), DAG.getCondCode(ISD::SETUGE));
16529     break;
16530   case Intrinsic::aarch64_sve_cmphi:
16531     if (!N->getOperand(2).getValueType().isFloatingPoint())
16532       return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16533                          N->getValueType(0), N->getOperand(1), N->getOperand(2),
16534                          N->getOperand(3), DAG.getCondCode(ISD::SETUGT));
16535     break;
16536   case Intrinsic::aarch64_sve_fcmpge:
16537   case Intrinsic::aarch64_sve_cmpge:
16538     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16539                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16540                        N->getOperand(3), DAG.getCondCode(ISD::SETGE));
16541     break;
16542   case Intrinsic::aarch64_sve_fcmpgt:
16543   case Intrinsic::aarch64_sve_cmpgt:
16544     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16545                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16546                        N->getOperand(3), DAG.getCondCode(ISD::SETGT));
16547     break;
16548   case Intrinsic::aarch64_sve_fcmpeq:
16549   case Intrinsic::aarch64_sve_cmpeq:
16550     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16551                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16552                        N->getOperand(3), DAG.getCondCode(ISD::SETEQ));
16553     break;
16554   case Intrinsic::aarch64_sve_fcmpne:
16555   case Intrinsic::aarch64_sve_cmpne:
16556     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16557                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16558                        N->getOperand(3), DAG.getCondCode(ISD::SETNE));
16559     break;
16560   case Intrinsic::aarch64_sve_fcmpuo:
16561     return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, SDLoc(N),
16562                        N->getValueType(0), N->getOperand(1), N->getOperand(2),
16563                        N->getOperand(3), DAG.getCondCode(ISD::SETUO));
16564     break;
16565   case Intrinsic::aarch64_sve_fadda:
16566     return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
16567   case Intrinsic::aarch64_sve_faddv:
16568     return combineSVEReductionFP(N, AArch64ISD::FADDV_PRED, DAG);
16569   case Intrinsic::aarch64_sve_fmaxnmv:
16570     return combineSVEReductionFP(N, AArch64ISD::FMAXNMV_PRED, DAG);
16571   case Intrinsic::aarch64_sve_fmaxv:
16572     return combineSVEReductionFP(N, AArch64ISD::FMAXV_PRED, DAG);
16573   case Intrinsic::aarch64_sve_fminnmv:
16574     return combineSVEReductionFP(N, AArch64ISD::FMINNMV_PRED, DAG);
16575   case Intrinsic::aarch64_sve_fminv:
16576     return combineSVEReductionFP(N, AArch64ISD::FMINV_PRED, DAG);
16577   case Intrinsic::aarch64_sve_sel:
16578     return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0),
16579                        N->getOperand(1), N->getOperand(2), N->getOperand(3));
16580   case Intrinsic::aarch64_sve_cmpeq_wide:
16581     return tryConvertSVEWideCompare(N, ISD::SETEQ, DCI, DAG);
16582   case Intrinsic::aarch64_sve_cmpne_wide:
16583     return tryConvertSVEWideCompare(N, ISD::SETNE, DCI, DAG);
16584   case Intrinsic::aarch64_sve_cmpge_wide:
16585     return tryConvertSVEWideCompare(N, ISD::SETGE, DCI, DAG);
16586   case Intrinsic::aarch64_sve_cmpgt_wide:
16587     return tryConvertSVEWideCompare(N, ISD::SETGT, DCI, DAG);
16588   case Intrinsic::aarch64_sve_cmplt_wide:
16589     return tryConvertSVEWideCompare(N, ISD::SETLT, DCI, DAG);
16590   case Intrinsic::aarch64_sve_cmple_wide:
16591     return tryConvertSVEWideCompare(N, ISD::SETLE, DCI, DAG);
16592   case Intrinsic::aarch64_sve_cmphs_wide:
16593     return tryConvertSVEWideCompare(N, ISD::SETUGE, DCI, DAG);
16594   case Intrinsic::aarch64_sve_cmphi_wide:
16595     return tryConvertSVEWideCompare(N, ISD::SETUGT, DCI, DAG);
16596   case Intrinsic::aarch64_sve_cmplo_wide:
16597     return tryConvertSVEWideCompare(N, ISD::SETULT, DCI, DAG);
16598   case Intrinsic::aarch64_sve_cmpls_wide:
16599     return tryConvertSVEWideCompare(N, ISD::SETULE, DCI, DAG);
16600   case Intrinsic::aarch64_sve_ptest_any:
16601     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16602                     AArch64CC::ANY_ACTIVE);
16603   case Intrinsic::aarch64_sve_ptest_first:
16604     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16605                     AArch64CC::FIRST_ACTIVE);
16606   case Intrinsic::aarch64_sve_ptest_last:
16607     return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2),
16608                     AArch64CC::LAST_ACTIVE);
16609   }
16610   return SDValue();
16611 }
16612 
16613 static bool isCheapToExtend(const SDValue &N) {
16614   unsigned OC = N->getOpcode();
16615   return OC == ISD::LOAD || OC == ISD::MLOAD ||
16616          ISD::isConstantSplatVectorAllZeros(N.getNode());
16617 }
16618 
16619 static SDValue
16620 performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
16621                               SelectionDAG &DAG) {
16622   // If we have (sext (setcc A B)) and A and B are cheap to extend,
16623   // we can move the sext into the arguments and have the same result. For
16624   // example, if A and B are both loads, we can make those extending loads and
16625   // avoid an extra instruction. This pattern appears often in VLS code
16626   // generation where the inputs to the setcc have a different size to the
16627   // instruction that wants to use the result of the setcc.
16628   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
16629          N->getOperand(0)->getOpcode() == ISD::SETCC);
16630   const SDValue SetCC = N->getOperand(0);
16631 
16632   const SDValue CCOp0 = SetCC.getOperand(0);
16633   const SDValue CCOp1 = SetCC.getOperand(1);
16634   if (!CCOp0->getValueType(0).isInteger() ||
16635       !CCOp1->getValueType(0).isInteger())
16636     return SDValue();
16637 
16638   ISD::CondCode Code =
16639       cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get();
16640 
16641   ISD::NodeType ExtType =
16642       isSignedIntSetCC(Code) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16643 
16644   if (isCheapToExtend(SetCC.getOperand(0)) &&
16645       isCheapToExtend(SetCC.getOperand(1))) {
16646     const SDValue Ext1 =
16647         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0);
16648     const SDValue Ext2 =
16649         DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1);
16650 
16651     return DAG.getSetCC(
16652         SDLoc(SetCC), N->getValueType(0), Ext1, Ext2,
16653         cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get());
16654   }
16655 
16656   return SDValue();
16657 }
16658 
16659 static SDValue performExtendCombine(SDNode *N,
16660                                     TargetLowering::DAGCombinerInfo &DCI,
16661                                     SelectionDAG &DAG) {
16662   // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then
16663   // we can convert that DUP into another extract_high (of a bigger DUP), which
16664   // helps the backend to decide that an sabdl2 would be useful, saving a real
16665   // extract_high operation.
16666   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
16667       (N->getOperand(0).getOpcode() == ISD::ABDU ||
16668        N->getOperand(0).getOpcode() == ISD::ABDS)) {
16669     SDNode *ABDNode = N->getOperand(0).getNode();
16670     SDValue NewABD =
16671         tryCombineLongOpWithDup(Intrinsic::not_intrinsic, ABDNode, DCI, DAG);
16672     if (!NewABD.getNode())
16673       return SDValue();
16674 
16675     return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD);
16676   }
16677 
16678   if (N->getValueType(0).isFixedLengthVector() &&
16679       N->getOpcode() == ISD::SIGN_EXTEND &&
16680       N->getOperand(0)->getOpcode() == ISD::SETCC)
16681     return performSignExtendSetCCCombine(N, DCI, DAG);
16682 
16683   return SDValue();
16684 }
16685 
16686 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
16687                                SDValue SplatVal, unsigned NumVecElts) {
16688   assert(!St.isTruncatingStore() && "cannot split truncating vector store");
16689   Align OrigAlignment = St.getAlign();
16690   unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8;
16691 
16692   // Create scalar stores. This is at least as good as the code sequence for a
16693   // split unaligned store which is a dup.s, ext.b, and two stores.
16694   // Most of the time the three stores should be replaced by store pair
16695   // instructions (stp).
16696   SDLoc DL(&St);
16697   SDValue BasePtr = St.getBasePtr();
16698   uint64_t BaseOffset = 0;
16699 
16700   const MachinePointerInfo &PtrInfo = St.getPointerInfo();
16701   SDValue NewST1 =
16702       DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
16703                    OrigAlignment, St.getMemOperand()->getFlags());
16704 
16705   // As this in ISel, we will not merge this add which may degrade results.
16706   if (BasePtr->getOpcode() == ISD::ADD &&
16707       isa<ConstantSDNode>(BasePtr->getOperand(1))) {
16708     BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
16709     BasePtr = BasePtr->getOperand(0);
16710   }
16711 
16712   unsigned Offset = EltOffset;
16713   while (--NumVecElts) {
16714     Align Alignment = commonAlignment(OrigAlignment, Offset);
16715     SDValue OffsetPtr =
16716         DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
16717                     DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
16718     NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
16719                           PtrInfo.getWithOffset(Offset), Alignment,
16720                           St.getMemOperand()->getFlags());
16721     Offset += EltOffset;
16722   }
16723   return NewST1;
16724 }
16725 
16726 // Returns an SVE type that ContentTy can be trivially sign or zero extended
16727 // into.
16728 static MVT getSVEContainerType(EVT ContentTy) {
16729   assert(ContentTy.isSimple() && "No SVE containers for extended types");
16730 
16731   switch (ContentTy.getSimpleVT().SimpleTy) {
16732   default:
16733     llvm_unreachable("No known SVE container for this MVT type");
16734   case MVT::nxv2i8:
16735   case MVT::nxv2i16:
16736   case MVT::nxv2i32:
16737   case MVT::nxv2i64:
16738   case MVT::nxv2f32:
16739   case MVT::nxv2f64:
16740     return MVT::nxv2i64;
16741   case MVT::nxv4i8:
16742   case MVT::nxv4i16:
16743   case MVT::nxv4i32:
16744   case MVT::nxv4f32:
16745     return MVT::nxv4i32;
16746   case MVT::nxv8i8:
16747   case MVT::nxv8i16:
16748   case MVT::nxv8f16:
16749   case MVT::nxv8bf16:
16750     return MVT::nxv8i16;
16751   case MVT::nxv16i8:
16752     return MVT::nxv16i8;
16753   }
16754 }
16755 
16756 static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {
16757   SDLoc DL(N);
16758   EVT VT = N->getValueType(0);
16759 
16760   if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
16761     return SDValue();
16762 
16763   EVT ContainerVT = VT;
16764   if (ContainerVT.isInteger())
16765     ContainerVT = getSVEContainerType(ContainerVT);
16766 
16767   SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other);
16768   SDValue Ops[] = { N->getOperand(0), // Chain
16769                     N->getOperand(2), // Pg
16770                     N->getOperand(3), // Base
16771                     DAG.getValueType(VT) };
16772 
16773   SDValue Load = DAG.getNode(Opc, DL, VTs, Ops);
16774   SDValue LoadChain = SDValue(Load.getNode(), 1);
16775 
16776   if (ContainerVT.isInteger() && (VT != ContainerVT))
16777     Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0));
16778 
16779   return DAG.getMergeValues({ Load, LoadChain }, DL);
16780 }
16781 
16782 static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
16783   SDLoc DL(N);
16784   EVT VT = N->getValueType(0);
16785   EVT PtrTy = N->getOperand(3).getValueType();
16786 
16787   EVT LoadVT = VT;
16788   if (VT.isFloatingPoint())
16789     LoadVT = VT.changeTypeToInteger();
16790 
16791   auto *MINode = cast<MemIntrinsicSDNode>(N);
16792   SDValue PassThru = DAG.getConstant(0, DL, LoadVT);
16793   SDValue L = DAG.getMaskedLoad(LoadVT, DL, MINode->getChain(),
16794                                 MINode->getOperand(3), DAG.getUNDEF(PtrTy),
16795                                 MINode->getOperand(2), PassThru,
16796                                 MINode->getMemoryVT(), MINode->getMemOperand(),
16797                                 ISD::UNINDEXED, ISD::NON_EXTLOAD, false);
16798 
16799    if (VT.isFloatingPoint()) {
16800      SDValue Ops[] = { DAG.getNode(ISD::BITCAST, DL, VT, L), L.getValue(1) };
16801      return DAG.getMergeValues(Ops, DL);
16802    }
16803 
16804   return L;
16805 }
16806 
16807 template <unsigned Opcode>
16808 static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {
16809   static_assert(Opcode == AArch64ISD::LD1RQ_MERGE_ZERO ||
16810                     Opcode == AArch64ISD::LD1RO_MERGE_ZERO,
16811                 "Unsupported opcode.");
16812   SDLoc DL(N);
16813   EVT VT = N->getValueType(0);
16814 
16815   EVT LoadVT = VT;
16816   if (VT.isFloatingPoint())
16817     LoadVT = VT.changeTypeToInteger();
16818 
16819   SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
16820   SDValue Load = DAG.getNode(Opcode, DL, {LoadVT, MVT::Other}, Ops);
16821   SDValue LoadChain = SDValue(Load.getNode(), 1);
16822 
16823   if (VT.isFloatingPoint())
16824     Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
16825 
16826   return DAG.getMergeValues({Load, LoadChain}, DL);
16827 }
16828 
16829 static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
16830   SDLoc DL(N);
16831   SDValue Data = N->getOperand(2);
16832   EVT DataVT = Data.getValueType();
16833   EVT HwSrcVt = getSVEContainerType(DataVT);
16834   SDValue InputVT = DAG.getValueType(DataVT);
16835 
16836   if (DataVT.isFloatingPoint())
16837     InputVT = DAG.getValueType(HwSrcVt);
16838 
16839   SDValue SrcNew;
16840   if (Data.getValueType().isFloatingPoint())
16841     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data);
16842   else
16843     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data);
16844 
16845   SDValue Ops[] = { N->getOperand(0), // Chain
16846                     SrcNew,
16847                     N->getOperand(4), // Base
16848                     N->getOperand(3), // Pg
16849                     InputVT
16850                   };
16851 
16852   return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops);
16853 }
16854 
16855 static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
16856   SDLoc DL(N);
16857 
16858   SDValue Data = N->getOperand(2);
16859   EVT DataVT = Data.getValueType();
16860   EVT PtrTy = N->getOperand(4).getValueType();
16861 
16862   if (DataVT.isFloatingPoint())
16863     Data = DAG.getNode(ISD::BITCAST, DL, DataVT.changeTypeToInteger(), Data);
16864 
16865   auto *MINode = cast<MemIntrinsicSDNode>(N);
16866   return DAG.getMaskedStore(MINode->getChain(), DL, Data, MINode->getOperand(4),
16867                             DAG.getUNDEF(PtrTy), MINode->getOperand(3),
16868                             MINode->getMemoryVT(), MINode->getMemOperand(),
16869                             ISD::UNINDEXED, false, false);
16870 }
16871 
16872 /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.  The
16873 /// load store optimizer pass will merge them to store pair stores.  This should
16874 /// be better than a movi to create the vector zero followed by a vector store
16875 /// if the zero constant is not re-used, since one instructions and one register
16876 /// live range will be removed.
16877 ///
16878 /// For example, the final generated code should be:
16879 ///
16880 ///   stp xzr, xzr, [x0]
16881 ///
16882 /// instead of:
16883 ///
16884 ///   movi v0.2d, #0
16885 ///   str q0, [x0]
16886 ///
16887 static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
16888   SDValue StVal = St.getValue();
16889   EVT VT = StVal.getValueType();
16890 
16891   // Avoid scalarizing zero splat stores for scalable vectors.
16892   if (VT.isScalableVector())
16893     return SDValue();
16894 
16895   // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or
16896   // 2, 3 or 4 i32 elements.
16897   int NumVecElts = VT.getVectorNumElements();
16898   if (!(((NumVecElts == 2 || NumVecElts == 3) &&
16899          VT.getVectorElementType().getSizeInBits() == 64) ||
16900         ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
16901          VT.getVectorElementType().getSizeInBits() == 32)))
16902     return SDValue();
16903 
16904   if (StVal.getOpcode() != ISD::BUILD_VECTOR)
16905     return SDValue();
16906 
16907   // If the zero constant has more than one use then the vector store could be
16908   // better since the constant mov will be amortized and stp q instructions
16909   // should be able to be formed.
16910   if (!StVal.hasOneUse())
16911     return SDValue();
16912 
16913   // If the store is truncating then it's going down to i16 or smaller, which
16914   // means it can be implemented in a single store anyway.
16915   if (St.isTruncatingStore())
16916     return SDValue();
16917 
16918   // If the immediate offset of the address operand is too large for the stp
16919   // instruction, then bail out.
16920   if (DAG.isBaseWithConstantOffset(St.getBasePtr())) {
16921     int64_t Offset = St.getBasePtr()->getConstantOperandVal(1);
16922     if (Offset < -512 || Offset > 504)
16923       return SDValue();
16924   }
16925 
16926   for (int I = 0; I < NumVecElts; ++I) {
16927     SDValue EltVal = StVal.getOperand(I);
16928     if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal))
16929       return SDValue();
16930   }
16931 
16932   // Use a CopyFromReg WZR/XZR here to prevent
16933   // DAGCombiner::MergeConsecutiveStores from undoing this transformation.
16934   SDLoc DL(&St);
16935   unsigned ZeroReg;
16936   EVT ZeroVT;
16937   if (VT.getVectorElementType().getSizeInBits() == 32) {
16938     ZeroReg = AArch64::WZR;
16939     ZeroVT = MVT::i32;
16940   } else {
16941     ZeroReg = AArch64::XZR;
16942     ZeroVT = MVT::i64;
16943   }
16944   SDValue SplatVal =
16945       DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT);
16946   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
16947 }
16948 
16949 /// Replace a splat of a scalar to a vector store by scalar stores of the scalar
16950 /// value. The load store optimizer pass will merge them to store pair stores.
16951 /// This has better performance than a splat of the scalar followed by a split
16952 /// vector store. Even if the stores are not merged it is four stores vs a dup,
16953 /// followed by an ext.b and two stores.
16954 static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {
16955   SDValue StVal = St.getValue();
16956   EVT VT = StVal.getValueType();
16957 
16958   // Don't replace floating point stores, they possibly won't be transformed to
16959   // stp because of the store pair suppress pass.
16960   if (VT.isFloatingPoint())
16961     return SDValue();
16962 
16963   // We can express a splat as store pair(s) for 2 or 4 elements.
16964   unsigned NumVecElts = VT.getVectorNumElements();
16965   if (NumVecElts != 4 && NumVecElts != 2)
16966     return SDValue();
16967 
16968   // If the store is truncating then it's going down to i16 or smaller, which
16969   // means it can be implemented in a single store anyway.
16970   if (St.isTruncatingStore())
16971     return SDValue();
16972 
16973   // Check that this is a splat.
16974   // Make sure that each of the relevant vector element locations are inserted
16975   // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32.
16976   std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
16977   SDValue SplatVal;
16978   for (unsigned I = 0; I < NumVecElts; ++I) {
16979     // Check for insert vector elements.
16980     if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT)
16981       return SDValue();
16982 
16983     // Check that same value is inserted at each vector element.
16984     if (I == 0)
16985       SplatVal = StVal.getOperand(1);
16986     else if (StVal.getOperand(1) != SplatVal)
16987       return SDValue();
16988 
16989     // Check insert element index.
16990     ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2));
16991     if (!CIndex)
16992       return SDValue();
16993     uint64_t IndexVal = CIndex->getZExtValue();
16994     if (IndexVal >= NumVecElts)
16995       return SDValue();
16996     IndexNotInserted.reset(IndexVal);
16997 
16998     StVal = StVal.getOperand(0);
16999   }
17000   // Check that all vector element locations were inserted to.
17001   if (IndexNotInserted.any())
17002       return SDValue();
17003 
17004   return splitStoreSplat(DAG, St, SplatVal, NumVecElts);
17005 }
17006 
17007 static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17008                            SelectionDAG &DAG,
17009                            const AArch64Subtarget *Subtarget) {
17010 
17011   StoreSDNode *S = cast<StoreSDNode>(N);
17012   if (S->isVolatile() || S->isIndexed())
17013     return SDValue();
17014 
17015   SDValue StVal = S->getValue();
17016   EVT VT = StVal.getValueType();
17017 
17018   if (!VT.isFixedLengthVector())
17019     return SDValue();
17020 
17021   // If we get a splat of zeros, convert this vector store to a store of
17022   // scalars. They will be merged into store pairs of xzr thereby removing one
17023   // instruction and one register.
17024   if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S))
17025     return ReplacedZeroSplat;
17026 
17027   // FIXME: The logic for deciding if an unaligned store should be split should
17028   // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
17029   // a call to that function here.
17030 
17031   if (!Subtarget->isMisaligned128StoreSlow())
17032     return SDValue();
17033 
17034   // Don't split at -Oz.
17035   if (DAG.getMachineFunction().getFunction().hasMinSize())
17036     return SDValue();
17037 
17038   // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
17039   // those up regresses performance on micro-benchmarks and olden/bh.
17040   if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64)
17041     return SDValue();
17042 
17043   // Split unaligned 16B stores. They are terrible for performance.
17044   // Don't split stores with alignment of 1 or 2. Code that uses clang vector
17045   // extensions can use this to mark that it does not want splitting to happen
17046   // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of
17047   // eliminating alignment hazards is only 1 in 8 for alignment of 2.
17048   if (VT.getSizeInBits() != 128 || S->getAlign() >= Align(16) ||
17049       S->getAlign() <= Align(2))
17050     return SDValue();
17051 
17052   // If we get a splat of a scalar convert this vector store to a store of
17053   // scalars. They will be merged into store pairs thereby removing two
17054   // instructions.
17055   if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S))
17056     return ReplacedSplat;
17057 
17058   SDLoc DL(S);
17059 
17060   // Split VT into two.
17061   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
17062   unsigned NumElts = HalfVT.getVectorNumElements();
17063   SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17064                                    DAG.getConstant(0, DL, MVT::i64));
17065   SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
17066                                    DAG.getConstant(NumElts, DL, MVT::i64));
17067   SDValue BasePtr = S->getBasePtr();
17068   SDValue NewST1 =
17069       DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
17070                    S->getAlign(), S->getMemOperand()->getFlags());
17071   SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
17072                                   DAG.getConstant(8, DL, MVT::i64));
17073   return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
17074                       S->getPointerInfo(), S->getAlign(),
17075                       S->getMemOperand()->getFlags());
17076 }
17077 
17078 static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
17079   assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
17080 
17081   // splice(pg, op1, undef) -> op1
17082   if (N->getOperand(2).isUndef())
17083     return N->getOperand(1);
17084 
17085   return SDValue();
17086 }
17087 
17088 static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG) {
17089   assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
17090           N->getOpcode() == AArch64ISD::UUNPKLO) &&
17091          "Unexpected Opcode!");
17092 
17093   // uunpklo/hi undef -> undef
17094   if (N->getOperand(0).isUndef())
17095     return DAG.getUNDEF(N->getValueType(0));
17096 
17097   return SDValue();
17098 }
17099 
17100 static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG) {
17101   SDLoc DL(N);
17102   SDValue Op0 = N->getOperand(0);
17103   SDValue Op1 = N->getOperand(1);
17104   EVT ResVT = N->getValueType(0);
17105 
17106   // uzp1(x, undef) -> concat(truncate(x), undef)
17107   if (Op1.getOpcode() == ISD::UNDEF) {
17108     EVT BCVT = MVT::Other, HalfVT = MVT::Other;
17109     switch (ResVT.getSimpleVT().SimpleTy) {
17110     default:
17111       break;
17112     case MVT::v16i8:
17113       BCVT = MVT::v8i16;
17114       HalfVT = MVT::v8i8;
17115       break;
17116     case MVT::v8i16:
17117       BCVT = MVT::v4i32;
17118       HalfVT = MVT::v4i16;
17119       break;
17120     case MVT::v4i32:
17121       BCVT = MVT::v2i64;
17122       HalfVT = MVT::v2i32;
17123       break;
17124     }
17125     if (BCVT != MVT::Other) {
17126       SDValue BC = DAG.getBitcast(BCVT, Op0);
17127       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, HalfVT, BC);
17128       return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Trunc,
17129                          DAG.getUNDEF(HalfVT));
17130     }
17131   }
17132 
17133   // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
17134   if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
17135     if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17136       SDValue X = Op0.getOperand(0).getOperand(0);
17137       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
17138     }
17139   }
17140 
17141   // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
17142   if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
17143     if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
17144       SDValue Z = Op1.getOperand(0).getOperand(1);
17145       return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
17146     }
17147   }
17148 
17149   return SDValue();
17150 }
17151 
17152 static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {
17153   unsigned Opc = N->getOpcode();
17154 
17155   assert(((Opc >= AArch64ISD::GLD1_MERGE_ZERO && // unsigned gather loads
17156            Opc <= AArch64ISD::GLD1_IMM_MERGE_ZERO) ||
17157           (Opc >= AArch64ISD::GLD1S_MERGE_ZERO && // signed gather loads
17158            Opc <= AArch64ISD::GLD1S_IMM_MERGE_ZERO)) &&
17159          "Invalid opcode.");
17160 
17161   const bool Scaled = Opc == AArch64ISD::GLD1_SCALED_MERGE_ZERO ||
17162                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17163   const bool Signed = Opc == AArch64ISD::GLD1S_MERGE_ZERO ||
17164                       Opc == AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
17165   const bool Extended = Opc == AArch64ISD::GLD1_SXTW_MERGE_ZERO ||
17166                         Opc == AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO ||
17167                         Opc == AArch64ISD::GLD1_UXTW_MERGE_ZERO ||
17168                         Opc == AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO;
17169 
17170   SDLoc DL(N);
17171   SDValue Chain = N->getOperand(0);
17172   SDValue Pg = N->getOperand(1);
17173   SDValue Base = N->getOperand(2);
17174   SDValue Offset = N->getOperand(3);
17175   SDValue Ty = N->getOperand(4);
17176 
17177   EVT ResVT = N->getValueType(0);
17178 
17179   const auto OffsetOpc = Offset.getOpcode();
17180   const bool OffsetIsZExt =
17181       OffsetOpc == AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU;
17182   const bool OffsetIsSExt =
17183       OffsetOpc == AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU;
17184 
17185   // Fold sign/zero extensions of vector offsets into GLD1 nodes where possible.
17186   if (!Extended && (OffsetIsSExt || OffsetIsZExt)) {
17187     SDValue ExtPg = Offset.getOperand(0);
17188     VTSDNode *ExtFrom = cast<VTSDNode>(Offset.getOperand(2).getNode());
17189     EVT ExtFromEVT = ExtFrom->getVT().getVectorElementType();
17190 
17191     // If the predicate for the sign- or zero-extended offset is the
17192     // same as the predicate used for this load and the sign-/zero-extension
17193     // was from a 32-bits...
17194     if (ExtPg == Pg && ExtFromEVT == MVT::i32) {
17195       SDValue UnextendedOffset = Offset.getOperand(1);
17196 
17197       unsigned NewOpc = getGatherVecOpcode(Scaled, OffsetIsSExt, true);
17198       if (Signed)
17199         NewOpc = getSignExtendedGatherOpcode(NewOpc);
17200 
17201       return DAG.getNode(NewOpc, DL, {ResVT, MVT::Other},
17202                          {Chain, Pg, Base, UnextendedOffset, Ty});
17203     }
17204   }
17205 
17206   return SDValue();
17207 }
17208 
17209 /// Optimize a vector shift instruction and its operand if shifted out
17210 /// bits are not used.
17211 static SDValue performVectorShiftCombine(SDNode *N,
17212                                          const AArch64TargetLowering &TLI,
17213                                          TargetLowering::DAGCombinerInfo &DCI) {
17214   assert(N->getOpcode() == AArch64ISD::VASHR ||
17215          N->getOpcode() == AArch64ISD::VLSHR);
17216 
17217   SDValue Op = N->getOperand(0);
17218   unsigned OpScalarSize = Op.getScalarValueSizeInBits();
17219 
17220   unsigned ShiftImm = N->getConstantOperandVal(1);
17221   assert(OpScalarSize > ShiftImm && "Invalid shift imm");
17222 
17223   APInt ShiftedOutBits = APInt::getLowBitsSet(OpScalarSize, ShiftImm);
17224   APInt DemandedMask = ~ShiftedOutBits;
17225 
17226   if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI))
17227     return SDValue(N, 0);
17228 
17229   return SDValue();
17230 }
17231 
17232 static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {
17233   // sunpklo(sext(pred)) -> sext(extract_low_half(pred))
17234   // This transform works in partnership with performSetCCPunpkCombine to
17235   // remove unnecessary transfer of predicates into standard registers and back
17236   if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND &&
17237       N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() ==
17238           MVT::i1) {
17239     SDValue CC = N->getOperand(0)->getOperand(0);
17240     auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext());
17241     SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC,
17242                                DAG.getVectorIdxConstant(0, SDLoc(N)));
17243     return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk);
17244   }
17245 
17246   return SDValue();
17247 }
17248 
17249 /// Target-specific DAG combine function for post-increment LD1 (lane) and
17250 /// post-increment LD1R.
17251 static SDValue performPostLD1Combine(SDNode *N,
17252                                      TargetLowering::DAGCombinerInfo &DCI,
17253                                      bool IsLaneOp) {
17254   if (DCI.isBeforeLegalizeOps())
17255     return SDValue();
17256 
17257   SelectionDAG &DAG = DCI.DAG;
17258   EVT VT = N->getValueType(0);
17259 
17260   if (!VT.is128BitVector() && !VT.is64BitVector())
17261     return SDValue();
17262 
17263   unsigned LoadIdx = IsLaneOp ? 1 : 0;
17264   SDNode *LD = N->getOperand(LoadIdx).getNode();
17265   // If it is not LOAD, can not do such combine.
17266   if (LD->getOpcode() != ISD::LOAD)
17267     return SDValue();
17268 
17269   // The vector lane must be a constant in the LD1LANE opcode.
17270   SDValue Lane;
17271   if (IsLaneOp) {
17272     Lane = N->getOperand(2);
17273     auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
17274     if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
17275       return SDValue();
17276   }
17277 
17278   LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
17279   EVT MemVT = LoadSDN->getMemoryVT();
17280   // Check if memory operand is the same type as the vector element.
17281   if (MemVT != VT.getVectorElementType())
17282     return SDValue();
17283 
17284   // Check if there are other uses. If so, do not combine as it will introduce
17285   // an extra load.
17286   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE;
17287        ++UI) {
17288     if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result.
17289       continue;
17290     if (*UI != N)
17291       return SDValue();
17292   }
17293 
17294   SDValue Addr = LD->getOperand(1);
17295   SDValue Vector = N->getOperand(0);
17296   // Search for a use of the address operand that is an increment.
17297   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE =
17298        Addr.getNode()->use_end(); UI != UE; ++UI) {
17299     SDNode *User = *UI;
17300     if (User->getOpcode() != ISD::ADD
17301         || UI.getUse().getResNo() != Addr.getResNo())
17302       continue;
17303 
17304     // If the increment is a constant, it must match the memory ref size.
17305     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17306     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17307       uint32_t IncVal = CInc->getZExtValue();
17308       unsigned NumBytes = VT.getScalarSizeInBits() / 8;
17309       if (IncVal != NumBytes)
17310         continue;
17311       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17312     }
17313 
17314     // To avoid cycle construction make sure that neither the load nor the add
17315     // are predecessors to each other or the Vector.
17316     SmallPtrSet<const SDNode *, 32> Visited;
17317     SmallVector<const SDNode *, 16> Worklist;
17318     Visited.insert(Addr.getNode());
17319     Worklist.push_back(User);
17320     Worklist.push_back(LD);
17321     Worklist.push_back(Vector.getNode());
17322     if (SDNode::hasPredecessorHelper(LD, Visited, Worklist) ||
17323         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17324       continue;
17325 
17326     SmallVector<SDValue, 8> Ops;
17327     Ops.push_back(LD->getOperand(0));  // Chain
17328     if (IsLaneOp) {
17329       Ops.push_back(Vector);           // The vector to be inserted
17330       Ops.push_back(Lane);             // The lane to be inserted in the vector
17331     }
17332     Ops.push_back(Addr);
17333     Ops.push_back(Inc);
17334 
17335     EVT Tys[3] = { VT, MVT::i64, MVT::Other };
17336     SDVTList SDTys = DAG.getVTList(Tys);
17337     unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost;
17338     SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops,
17339                                            MemVT,
17340                                            LoadSDN->getMemOperand());
17341 
17342     // Update the uses.
17343     SDValue NewResults[] = {
17344         SDValue(LD, 0),            // The result of load
17345         SDValue(UpdN.getNode(), 2) // Chain
17346     };
17347     DCI.CombineTo(LD, NewResults);
17348     DCI.CombineTo(N, SDValue(UpdN.getNode(), 0));     // Dup/Inserted Result
17349     DCI.CombineTo(User, SDValue(UpdN.getNode(), 1));  // Write back register
17350 
17351     break;
17352   }
17353   return SDValue();
17354 }
17355 
17356 /// Simplify ``Addr`` given that the top byte of it is ignored by HW during
17357 /// address translation.
17358 static bool performTBISimplification(SDValue Addr,
17359                                      TargetLowering::DAGCombinerInfo &DCI,
17360                                      SelectionDAG &DAG) {
17361   APInt DemandedMask = APInt::getLowBitsSet(64, 56);
17362   KnownBits Known;
17363   TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
17364                                         !DCI.isBeforeLegalizeOps());
17365   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17366   if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
17367     DCI.CommitTargetLoweringOpt(TLO);
17368     return true;
17369   }
17370   return false;
17371 }
17372 
17373 static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
17374   assert((N->getOpcode() == ISD::STORE || N->getOpcode() == ISD::MSTORE) &&
17375          "Expected STORE dag node in input!");
17376 
17377   if (auto Store = dyn_cast<StoreSDNode>(N)) {
17378     if (!Store->isTruncatingStore() || Store->isIndexed())
17379       return SDValue();
17380     SDValue Ext = Store->getValue();
17381     auto ExtOpCode = Ext.getOpcode();
17382     if (ExtOpCode != ISD::ZERO_EXTEND && ExtOpCode != ISD::SIGN_EXTEND &&
17383         ExtOpCode != ISD::ANY_EXTEND)
17384       return SDValue();
17385     SDValue Orig = Ext->getOperand(0);
17386     if (Store->getMemoryVT() != Orig.getValueType())
17387       return SDValue();
17388     return DAG.getStore(Store->getChain(), SDLoc(Store), Orig,
17389                         Store->getBasePtr(), Store->getMemOperand());
17390   }
17391 
17392   return SDValue();
17393 }
17394 
17395 static SDValue performSTORECombine(SDNode *N,
17396                                    TargetLowering::DAGCombinerInfo &DCI,
17397                                    SelectionDAG &DAG,
17398                                    const AArch64Subtarget *Subtarget) {
17399   StoreSDNode *ST = cast<StoreSDNode>(N);
17400   SDValue Chain = ST->getChain();
17401   SDValue Value = ST->getValue();
17402   SDValue Ptr = ST->getBasePtr();
17403 
17404   // If this is an FP_ROUND followed by a store, fold this into a truncating
17405   // store. We can do this even if this is already a truncstore.
17406   // We purposefully don't care about legality of the nodes here as we know
17407   // they can be split down into something legal.
17408   if (DCI.isBeforeLegalizeOps() && Value.getOpcode() == ISD::FP_ROUND &&
17409       Value.getNode()->hasOneUse() && ST->isUnindexed() &&
17410       Subtarget->useSVEForFixedLengthVectors() &&
17411       Value.getValueType().isFixedLengthVector() &&
17412       Value.getValueType().getFixedSizeInBits() >=
17413           Subtarget->getMinSVEVectorSizeInBits())
17414     return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
17415                              ST->getMemoryVT(), ST->getMemOperand());
17416 
17417   if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
17418     return Split;
17419 
17420   if (Subtarget->supportsAddressTopByteIgnored() &&
17421       performTBISimplification(N->getOperand(2), DCI, DAG))
17422     return SDValue(N, 0);
17423 
17424   if (SDValue Store = foldTruncStoreOfExt(DAG, N))
17425     return Store;
17426 
17427   return SDValue();
17428 }
17429 
17430 /// \return true if part of the index was folded into the Base.
17431 static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
17432                               SDLoc DL, SelectionDAG &DAG) {
17433   // This function assumes a vector of i64 indices.
17434   EVT IndexVT = Index.getValueType();
17435   if (!IndexVT.isVector() || IndexVT.getVectorElementType() != MVT::i64)
17436     return false;
17437 
17438   // Simplify:
17439   //   BasePtr = Ptr
17440   //   Index = X + splat(Offset)
17441   // ->
17442   //   BasePtr = Ptr + Offset * scale.
17443   //   Index = X
17444   if (Index.getOpcode() == ISD::ADD) {
17445     if (auto Offset = DAG.getSplatValue(Index.getOperand(1))) {
17446       Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17447       BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17448       Index = Index.getOperand(0);
17449       return true;
17450     }
17451   }
17452 
17453   // Simplify:
17454   //   BasePtr = Ptr
17455   //   Index = (X + splat(Offset)) << splat(Shift)
17456   // ->
17457   //   BasePtr = Ptr + (Offset << Shift) * scale)
17458   //   Index = X << splat(shift)
17459   if (Index.getOpcode() == ISD::SHL &&
17460       Index.getOperand(0).getOpcode() == ISD::ADD) {
17461     SDValue Add = Index.getOperand(0);
17462     SDValue ShiftOp = Index.getOperand(1);
17463     SDValue OffsetOp = Add.getOperand(1);
17464     if (auto Shift = DAG.getSplatValue(ShiftOp))
17465       if (auto Offset = DAG.getSplatValue(OffsetOp)) {
17466         Offset = DAG.getNode(ISD::SHL, DL, MVT::i64, Offset, Shift);
17467         Offset = DAG.getNode(ISD::MUL, DL, MVT::i64, Offset, Scale);
17468         BasePtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, Offset);
17469         Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
17470                             Add.getOperand(0), ShiftOp);
17471         return true;
17472       }
17473   }
17474 
17475   return false;
17476 }
17477 
17478 // Analyse the specified address returning true if a more optimal addressing
17479 // mode is available. When returning true all parameters are updated to reflect
17480 // their recommended values.
17481 static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
17482                                      SDValue &BasePtr, SDValue &Index,
17483                                      SelectionDAG &DAG) {
17484   // Try to iteratively fold parts of the index into the base pointer to
17485   // simplify the index as much as possible.
17486   bool Changed = false;
17487   while (foldIndexIntoBase(BasePtr, Index, N->getScale(), SDLoc(N), DAG))
17488     Changed = true;
17489 
17490   // Only consider element types that are pointer sized as smaller types can
17491   // be easily promoted.
17492   EVT IndexVT = Index.getValueType();
17493   if (IndexVT.getVectorElementType() != MVT::i64 || IndexVT == MVT::nxv2i64)
17494     return Changed;
17495 
17496   // Match:
17497   //   Index = step(const)
17498   int64_t Stride = 0;
17499   if (Index.getOpcode() == ISD::STEP_VECTOR)
17500     Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue();
17501 
17502   // Match:
17503   //   Index = step(const) << shift(const)
17504   else if (Index.getOpcode() == ISD::SHL &&
17505            Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) {
17506     SDValue RHS = Index.getOperand(1);
17507     if (auto *Shift =
17508             dyn_cast_or_null<ConstantSDNode>(DAG.getSplatValue(RHS))) {
17509       int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1);
17510       Stride = Step << Shift->getZExtValue();
17511     }
17512   }
17513 
17514   // Return early because no supported pattern is found.
17515   if (Stride == 0)
17516     return Changed;
17517 
17518   if (Stride < std::numeric_limits<int32_t>::min() ||
17519       Stride > std::numeric_limits<int32_t>::max())
17520     return Changed;
17521 
17522   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
17523   unsigned MaxVScale =
17524       Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
17525   int64_t LastElementOffset =
17526       IndexVT.getVectorMinNumElements() * Stride * MaxVScale;
17527 
17528   if (LastElementOffset < std::numeric_limits<int32_t>::min() ||
17529       LastElementOffset > std::numeric_limits<int32_t>::max())
17530     return Changed;
17531 
17532   EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
17533   // Stride does not scale explicitly by 'Scale', because it happens in
17534   // the gather/scatter addressing mode.
17535   Index = DAG.getNode(ISD::STEP_VECTOR, SDLoc(N), NewIndexVT,
17536                       DAG.getTargetConstant(Stride, SDLoc(N), MVT::i32));
17537   return true;
17538 }
17539 
17540 static SDValue performMaskedGatherScatterCombine(
17541     SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {
17542   MaskedGatherScatterSDNode *MGS = cast<MaskedGatherScatterSDNode>(N);
17543   assert(MGS && "Can only combine gather load or scatter store nodes");
17544 
17545   if (!DCI.isBeforeLegalize())
17546     return SDValue();
17547 
17548   SDLoc DL(MGS);
17549   SDValue Chain = MGS->getChain();
17550   SDValue Scale = MGS->getScale();
17551   SDValue Index = MGS->getIndex();
17552   SDValue Mask = MGS->getMask();
17553   SDValue BasePtr = MGS->getBasePtr();
17554   ISD::MemIndexType IndexType = MGS->getIndexType();
17555 
17556   if (!findMoreOptimalIndexType(MGS, BasePtr, Index, DAG))
17557     return SDValue();
17558 
17559   // Here we catch such cases early and change MGATHER's IndexType to allow
17560   // the use of an Index that's more legalisation friendly.
17561   if (auto *MGT = dyn_cast<MaskedGatherSDNode>(MGS)) {
17562     SDValue PassThru = MGT->getPassThru();
17563     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
17564     return DAG.getMaskedGather(
17565         DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL,
17566         Ops, MGT->getMemOperand(), IndexType, MGT->getExtensionType());
17567   }
17568   auto *MSC = cast<MaskedScatterSDNode>(MGS);
17569   SDValue Data = MSC->getValue();
17570   SDValue Ops[] = {Chain, Data, Mask, BasePtr, Index, Scale};
17571   return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), MSC->getMemoryVT(), DL,
17572                               Ops, MSC->getMemOperand(), IndexType,
17573                               MSC->isTruncatingStore());
17574 }
17575 
17576 /// Target-specific DAG combine function for NEON load/store intrinsics
17577 /// to merge base address updates.
17578 static SDValue performNEONPostLDSTCombine(SDNode *N,
17579                                           TargetLowering::DAGCombinerInfo &DCI,
17580                                           SelectionDAG &DAG) {
17581   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
17582     return SDValue();
17583 
17584   unsigned AddrOpIdx = N->getNumOperands() - 1;
17585   SDValue Addr = N->getOperand(AddrOpIdx);
17586 
17587   // Search for a use of the address operand that is an increment.
17588   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
17589        UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
17590     SDNode *User = *UI;
17591     if (User->getOpcode() != ISD::ADD ||
17592         UI.getUse().getResNo() != Addr.getResNo())
17593       continue;
17594 
17595     // Check that the add is independent of the load/store.  Otherwise, folding
17596     // it would create a cycle.
17597     SmallPtrSet<const SDNode *, 32> Visited;
17598     SmallVector<const SDNode *, 16> Worklist;
17599     Visited.insert(Addr.getNode());
17600     Worklist.push_back(N);
17601     Worklist.push_back(User);
17602     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
17603         SDNode::hasPredecessorHelper(User, Visited, Worklist))
17604       continue;
17605 
17606     // Find the new opcode for the updating load/store.
17607     bool IsStore = false;
17608     bool IsLaneOp = false;
17609     bool IsDupOp = false;
17610     unsigned NewOpc = 0;
17611     unsigned NumVecs = 0;
17612     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
17613     switch (IntNo) {
17614     default: llvm_unreachable("unexpected intrinsic for Neon base update");
17615     case Intrinsic::aarch64_neon_ld2:       NewOpc = AArch64ISD::LD2post;
17616       NumVecs = 2; break;
17617     case Intrinsic::aarch64_neon_ld3:       NewOpc = AArch64ISD::LD3post;
17618       NumVecs = 3; break;
17619     case Intrinsic::aarch64_neon_ld4:       NewOpc = AArch64ISD::LD4post;
17620       NumVecs = 4; break;
17621     case Intrinsic::aarch64_neon_st2:       NewOpc = AArch64ISD::ST2post;
17622       NumVecs = 2; IsStore = true; break;
17623     case Intrinsic::aarch64_neon_st3:       NewOpc = AArch64ISD::ST3post;
17624       NumVecs = 3; IsStore = true; break;
17625     case Intrinsic::aarch64_neon_st4:       NewOpc = AArch64ISD::ST4post;
17626       NumVecs = 4; IsStore = true; break;
17627     case Intrinsic::aarch64_neon_ld1x2:     NewOpc = AArch64ISD::LD1x2post;
17628       NumVecs = 2; break;
17629     case Intrinsic::aarch64_neon_ld1x3:     NewOpc = AArch64ISD::LD1x3post;
17630       NumVecs = 3; break;
17631     case Intrinsic::aarch64_neon_ld1x4:     NewOpc = AArch64ISD::LD1x4post;
17632       NumVecs = 4; break;
17633     case Intrinsic::aarch64_neon_st1x2:     NewOpc = AArch64ISD::ST1x2post;
17634       NumVecs = 2; IsStore = true; break;
17635     case Intrinsic::aarch64_neon_st1x3:     NewOpc = AArch64ISD::ST1x3post;
17636       NumVecs = 3; IsStore = true; break;
17637     case Intrinsic::aarch64_neon_st1x4:     NewOpc = AArch64ISD::ST1x4post;
17638       NumVecs = 4; IsStore = true; break;
17639     case Intrinsic::aarch64_neon_ld2r:      NewOpc = AArch64ISD::LD2DUPpost;
17640       NumVecs = 2; IsDupOp = true; break;
17641     case Intrinsic::aarch64_neon_ld3r:      NewOpc = AArch64ISD::LD3DUPpost;
17642       NumVecs = 3; IsDupOp = true; break;
17643     case Intrinsic::aarch64_neon_ld4r:      NewOpc = AArch64ISD::LD4DUPpost;
17644       NumVecs = 4; IsDupOp = true; break;
17645     case Intrinsic::aarch64_neon_ld2lane:   NewOpc = AArch64ISD::LD2LANEpost;
17646       NumVecs = 2; IsLaneOp = true; break;
17647     case Intrinsic::aarch64_neon_ld3lane:   NewOpc = AArch64ISD::LD3LANEpost;
17648       NumVecs = 3; IsLaneOp = true; break;
17649     case Intrinsic::aarch64_neon_ld4lane:   NewOpc = AArch64ISD::LD4LANEpost;
17650       NumVecs = 4; IsLaneOp = true; break;
17651     case Intrinsic::aarch64_neon_st2lane:   NewOpc = AArch64ISD::ST2LANEpost;
17652       NumVecs = 2; IsStore = true; IsLaneOp = true; break;
17653     case Intrinsic::aarch64_neon_st3lane:   NewOpc = AArch64ISD::ST3LANEpost;
17654       NumVecs = 3; IsStore = true; IsLaneOp = true; break;
17655     case Intrinsic::aarch64_neon_st4lane:   NewOpc = AArch64ISD::ST4LANEpost;
17656       NumVecs = 4; IsStore = true; IsLaneOp = true; break;
17657     }
17658 
17659     EVT VecTy;
17660     if (IsStore)
17661       VecTy = N->getOperand(2).getValueType();
17662     else
17663       VecTy = N->getValueType(0);
17664 
17665     // If the increment is a constant, it must match the memory ref size.
17666     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
17667     if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
17668       uint32_t IncVal = CInc->getZExtValue();
17669       unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
17670       if (IsLaneOp || IsDupOp)
17671         NumBytes /= VecTy.getVectorNumElements();
17672       if (IncVal != NumBytes)
17673         continue;
17674       Inc = DAG.getRegister(AArch64::XZR, MVT::i64);
17675     }
17676     SmallVector<SDValue, 8> Ops;
17677     Ops.push_back(N->getOperand(0)); // Incoming chain
17678     // Load lane and store have vector list as input.
17679     if (IsLaneOp || IsStore)
17680       for (unsigned i = 2; i < AddrOpIdx; ++i)
17681         Ops.push_back(N->getOperand(i));
17682     Ops.push_back(Addr); // Base register
17683     Ops.push_back(Inc);
17684 
17685     // Return Types.
17686     EVT Tys[6];
17687     unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
17688     unsigned n;
17689     for (n = 0; n < NumResultVecs; ++n)
17690       Tys[n] = VecTy;
17691     Tys[n++] = MVT::i64;  // Type of write back register
17692     Tys[n] = MVT::Other;  // Type of the chain
17693     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
17694 
17695     MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
17696     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops,
17697                                            MemInt->getMemoryVT(),
17698                                            MemInt->getMemOperand());
17699 
17700     // Update the uses.
17701     std::vector<SDValue> NewResults;
17702     for (unsigned i = 0; i < NumResultVecs; ++i) {
17703       NewResults.push_back(SDValue(UpdN.getNode(), i));
17704     }
17705     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1));
17706     DCI.CombineTo(N, NewResults);
17707     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
17708 
17709     break;
17710   }
17711   return SDValue();
17712 }
17713 
17714 // Checks to see if the value is the prescribed width and returns information
17715 // about its extension mode.
17716 static
17717 bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
17718   ExtType = ISD::NON_EXTLOAD;
17719   switch(V.getNode()->getOpcode()) {
17720   default:
17721     return false;
17722   case ISD::LOAD: {
17723     LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
17724     if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8)
17725        || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) {
17726       ExtType = LoadNode->getExtensionType();
17727       return true;
17728     }
17729     return false;
17730   }
17731   case ISD::AssertSext: {
17732     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17733     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17734        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17735       ExtType = ISD::SEXTLOAD;
17736       return true;
17737     }
17738     return false;
17739   }
17740   case ISD::AssertZext: {
17741     VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
17742     if ((TypeNode->getVT() == MVT::i8 && width == 8)
17743        || (TypeNode->getVT() == MVT::i16 && width == 16)) {
17744       ExtType = ISD::ZEXTLOAD;
17745       return true;
17746     }
17747     return false;
17748   }
17749   case ISD::Constant:
17750   case ISD::TargetConstant: {
17751     return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
17752            1LL << (width - 1);
17753   }
17754   }
17755 
17756   return true;
17757 }
17758 
17759 // This function does a whole lot of voodoo to determine if the tests are
17760 // equivalent without and with a mask. Essentially what happens is that given a
17761 // DAG resembling:
17762 //
17763 //  +-------------+ +-------------+ +-------------+ +-------------+
17764 //  |    Input    | | AddConstant | | CompConstant| |     CC      |
17765 //  +-------------+ +-------------+ +-------------+ +-------------+
17766 //           |           |           |               |
17767 //           V           V           |    +----------+
17768 //          +-------------+  +----+  |    |
17769 //          |     ADD     |  |0xff|  |    |
17770 //          +-------------+  +----+  |    |
17771 //                  |           |    |    |
17772 //                  V           V    |    |
17773 //                 +-------------+   |    |
17774 //                 |     AND     |   |    |
17775 //                 +-------------+   |    |
17776 //                      |            |    |
17777 //                      +-----+      |    |
17778 //                            |      |    |
17779 //                            V      V    V
17780 //                           +-------------+
17781 //                           |     CMP     |
17782 //                           +-------------+
17783 //
17784 // The AND node may be safely removed for some combinations of inputs. In
17785 // particular we need to take into account the extension type of the Input,
17786 // the exact values of AddConstant, CompConstant, and CC, along with the nominal
17787 // width of the input (this can work for any width inputs, the above graph is
17788 // specific to 8 bits.
17789 //
17790 // The specific equations were worked out by generating output tables for each
17791 // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The
17792 // problem was simplified by working with 4 bit inputs, which means we only
17793 // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero
17794 // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8
17795 // patterns present in both extensions (0,7). For every distinct set of
17796 // AddConstant and CompConstants bit patterns we can consider the masked and
17797 // unmasked versions to be equivalent if the result of this function is true for
17798 // all 16 distinct bit patterns of for the current extension type of Input (w0).
17799 //
17800 //   sub      w8, w0, w1
17801 //   and      w10, w8, #0x0f
17802 //   cmp      w8, w2
17803 //   cset     w9, AArch64CC
17804 //   cmp      w10, w2
17805 //   cset     w11, AArch64CC
17806 //   cmp      w9, w11
17807 //   cset     w0, eq
17808 //   ret
17809 //
17810 // Since the above function shows when the outputs are equivalent it defines
17811 // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and
17812 // would be expensive to run during compiles. The equations below were written
17813 // in a test harness that confirmed they gave equivalent outputs to the above
17814 // for all inputs function, so they can be used determine if the removal is
17815 // legal instead.
17816 //
17817 // isEquivalentMaskless() is the code for testing if the AND can be removed
17818 // factored out of the DAG recognition as the DAG can take several forms.
17819 
17820 static bool isEquivalentMaskless(unsigned CC, unsigned width,
17821                                  ISD::LoadExtType ExtType, int AddConstant,
17822                                  int CompConstant) {
17823   // By being careful about our equations and only writing the in term
17824   // symbolic values and well known constants (0, 1, -1, MaxUInt) we can
17825   // make them generally applicable to all bit widths.
17826   int MaxUInt = (1 << width);
17827 
17828   // For the purposes of these comparisons sign extending the type is
17829   // equivalent to zero extending the add and displacing it by half the integer
17830   // width. Provided we are careful and make sure our equations are valid over
17831   // the whole range we can just adjust the input and avoid writing equations
17832   // for sign extended inputs.
17833   if (ExtType == ISD::SEXTLOAD)
17834     AddConstant -= (1 << (width-1));
17835 
17836   switch(CC) {
17837   case AArch64CC::LE:
17838   case AArch64CC::GT:
17839     if ((AddConstant == 0) ||
17840         (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
17841         (AddConstant >= 0 && CompConstant < 0) ||
17842         (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
17843       return true;
17844     break;
17845   case AArch64CC::LT:
17846   case AArch64CC::GE:
17847     if ((AddConstant == 0) ||
17848         (AddConstant >= 0 && CompConstant <= 0) ||
17849         (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
17850       return true;
17851     break;
17852   case AArch64CC::HI:
17853   case AArch64CC::LS:
17854     if ((AddConstant >= 0 && CompConstant < 0) ||
17855        (AddConstant <= 0 && CompConstant >= -1 &&
17856         CompConstant < AddConstant + MaxUInt))
17857       return true;
17858    break;
17859   case AArch64CC::PL:
17860   case AArch64CC::MI:
17861     if ((AddConstant == 0) ||
17862         (AddConstant > 0 && CompConstant <= 0) ||
17863         (AddConstant < 0 && CompConstant <= AddConstant))
17864       return true;
17865     break;
17866   case AArch64CC::LO:
17867   case AArch64CC::HS:
17868     if ((AddConstant >= 0 && CompConstant <= 0) ||
17869         (AddConstant <= 0 && CompConstant >= 0 &&
17870          CompConstant <= AddConstant + MaxUInt))
17871       return true;
17872     break;
17873   case AArch64CC::EQ:
17874   case AArch64CC::NE:
17875     if ((AddConstant > 0 && CompConstant < 0) ||
17876         (AddConstant < 0 && CompConstant >= 0 &&
17877          CompConstant < AddConstant + MaxUInt) ||
17878         (AddConstant >= 0 && CompConstant >= 0 &&
17879          CompConstant >= AddConstant) ||
17880         (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
17881       return true;
17882     break;
17883   case AArch64CC::VS:
17884   case AArch64CC::VC:
17885   case AArch64CC::AL:
17886   case AArch64CC::NV:
17887     return true;
17888   case AArch64CC::Invalid:
17889     break;
17890   }
17891 
17892   return false;
17893 }
17894 
17895 static
17896 SDValue performCONDCombine(SDNode *N,
17897                            TargetLowering::DAGCombinerInfo &DCI,
17898                            SelectionDAG &DAG, unsigned CCIndex,
17899                            unsigned CmpIndex) {
17900   unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue();
17901   SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
17902   unsigned CondOpcode = SubsNode->getOpcode();
17903 
17904   if (CondOpcode != AArch64ISD::SUBS)
17905     return SDValue();
17906 
17907   // There is a SUBS feeding this condition. Is it fed by a mask we can
17908   // use?
17909 
17910   SDNode *AndNode = SubsNode->getOperand(0).getNode();
17911   unsigned MaskBits = 0;
17912 
17913   if (AndNode->getOpcode() != ISD::AND)
17914     return SDValue();
17915 
17916   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) {
17917     uint32_t CNV = CN->getZExtValue();
17918     if (CNV == 255)
17919       MaskBits = 8;
17920     else if (CNV == 65535)
17921       MaskBits = 16;
17922   }
17923 
17924   if (!MaskBits)
17925     return SDValue();
17926 
17927   SDValue AddValue = AndNode->getOperand(0);
17928 
17929   if (AddValue.getOpcode() != ISD::ADD)
17930     return SDValue();
17931 
17932   // The basic dag structure is correct, grab the inputs and validate them.
17933 
17934   SDValue AddInputValue1 = AddValue.getNode()->getOperand(0);
17935   SDValue AddInputValue2 = AddValue.getNode()->getOperand(1);
17936   SDValue SubsInputValue = SubsNode->getOperand(1);
17937 
17938   // The mask is present and the provenance of all the values is a smaller type,
17939   // lets see if the mask is superfluous.
17940 
17941   if (!isa<ConstantSDNode>(AddInputValue2.getNode()) ||
17942       !isa<ConstantSDNode>(SubsInputValue.getNode()))
17943     return SDValue();
17944 
17945   ISD::LoadExtType ExtType;
17946 
17947   if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) ||
17948       !checkValueWidth(AddInputValue2, MaskBits, ExtType) ||
17949       !checkValueWidth(AddInputValue1, MaskBits, ExtType) )
17950     return SDValue();
17951 
17952   if(!isEquivalentMaskless(CC, MaskBits, ExtType,
17953                 cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(),
17954                 cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue()))
17955     return SDValue();
17956 
17957   // The AND is not necessary, remove it.
17958 
17959   SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0),
17960                                SubsNode->getValueType(1));
17961   SDValue Ops[] = { AddValue, SubsNode->getOperand(1) };
17962 
17963   SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops);
17964   DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode());
17965 
17966   return SDValue(N, 0);
17967 }
17968 
17969 // Optimize compare with zero and branch.
17970 static SDValue performBRCONDCombine(SDNode *N,
17971                                     TargetLowering::DAGCombinerInfo &DCI,
17972                                     SelectionDAG &DAG) {
17973   MachineFunction &MF = DAG.getMachineFunction();
17974   // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z instructions
17975   // will not be produced, as they are conditional branch instructions that do
17976   // not set flags.
17977   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
17978     return SDValue();
17979 
17980   if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
17981     N = NV.getNode();
17982   SDValue Chain = N->getOperand(0);
17983   SDValue Dest = N->getOperand(1);
17984   SDValue CCVal = N->getOperand(2);
17985   SDValue Cmp = N->getOperand(3);
17986 
17987   assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
17988   unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
17989   if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
17990     return SDValue();
17991 
17992   unsigned CmpOpc = Cmp.getOpcode();
17993   if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS)
17994     return SDValue();
17995 
17996   // Only attempt folding if there is only one use of the flag and no use of the
17997   // value.
17998   if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
17999     return SDValue();
18000 
18001   SDValue LHS = Cmp.getOperand(0);
18002   SDValue RHS = Cmp.getOperand(1);
18003 
18004   assert(LHS.getValueType() == RHS.getValueType() &&
18005          "Expected the value type to be the same for both operands!");
18006   if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64)
18007     return SDValue();
18008 
18009   if (isNullConstant(LHS))
18010     std::swap(LHS, RHS);
18011 
18012   if (!isNullConstant(RHS))
18013     return SDValue();
18014 
18015   if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA ||
18016       LHS.getOpcode() == ISD::SRL)
18017     return SDValue();
18018 
18019   // Fold the compare into the branch instruction.
18020   SDValue BR;
18021   if (CC == AArch64CC::EQ)
18022     BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18023   else
18024     BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest);
18025 
18026   // Do not add new nodes to DAG combiner worklist.
18027   DCI.CombineTo(N, BR, false);
18028 
18029   return SDValue();
18030 }
18031 
18032 static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {
18033   unsigned CC = N->getConstantOperandVal(2);
18034   SDValue SUBS = N->getOperand(3);
18035   SDValue Zero, CTTZ;
18036 
18037   if (CC == AArch64CC::EQ && SUBS.getOpcode() == AArch64ISD::SUBS) {
18038     Zero = N->getOperand(0);
18039     CTTZ = N->getOperand(1);
18040   } else if (CC == AArch64CC::NE && SUBS.getOpcode() == AArch64ISD::SUBS) {
18041     Zero = N->getOperand(1);
18042     CTTZ = N->getOperand(0);
18043   } else
18044     return SDValue();
18045 
18046   if ((CTTZ.getOpcode() != ISD::CTTZ && CTTZ.getOpcode() != ISD::TRUNCATE) ||
18047       (CTTZ.getOpcode() == ISD::TRUNCATE &&
18048        CTTZ.getOperand(0).getOpcode() != ISD::CTTZ))
18049     return SDValue();
18050 
18051   assert((CTTZ.getValueType() == MVT::i32 || CTTZ.getValueType() == MVT::i64) &&
18052          "Illegal type in CTTZ folding");
18053 
18054   if (!isNullConstant(Zero) || !isNullConstant(SUBS.getOperand(1)))
18055     return SDValue();
18056 
18057   SDValue X = CTTZ.getOpcode() == ISD::TRUNCATE
18058                   ? CTTZ.getOperand(0).getOperand(0)
18059                   : CTTZ.getOperand(0);
18060 
18061   if (X != SUBS.getOperand(0))
18062     return SDValue();
18063 
18064   unsigned BitWidth = CTTZ.getOpcode() == ISD::TRUNCATE
18065                           ? CTTZ.getOperand(0).getValueSizeInBits()
18066                           : CTTZ.getValueSizeInBits();
18067   SDValue BitWidthMinusOne =
18068       DAG.getConstant(BitWidth - 1, SDLoc(N), CTTZ.getValueType());
18069   return DAG.getNode(ISD::AND, SDLoc(N), CTTZ.getValueType(), CTTZ,
18070                      BitWidthMinusOne);
18071 }
18072 
18073 // Optimize CSEL instructions
18074 static SDValue performCSELCombine(SDNode *N,
18075                                   TargetLowering::DAGCombinerInfo &DCI,
18076                                   SelectionDAG &DAG) {
18077   // CSEL x, x, cc -> x
18078   if (N->getOperand(0) == N->getOperand(1))
18079     return N->getOperand(0);
18080 
18081   // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1
18082   // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1
18083   if (SDValue Folded = foldCSELofCTTZ(N, DAG))
18084 		return Folded;
18085 
18086   return performCONDCombine(N, DCI, DAG, 2, 3);
18087 }
18088 
18089 // Try to re-use an already extended operand of a vector SetCC feeding a
18090 // extended select. Doing so avoids requiring another full extension of the
18091 // SET_CC result when lowering the select.
18092 static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
18093   EVT Op0MVT = Op->getOperand(0).getValueType();
18094   if (!Op0MVT.isVector() || Op->use_empty())
18095     return SDValue();
18096 
18097   // Make sure that all uses of Op are VSELECTs with result matching types where
18098   // the result type has a larger element type than the SetCC operand.
18099   SDNode *FirstUse = *Op->use_begin();
18100   if (FirstUse->getOpcode() != ISD::VSELECT)
18101     return SDValue();
18102   EVT UseMVT = FirstUse->getValueType(0);
18103   if (UseMVT.getScalarSizeInBits() <= Op0MVT.getScalarSizeInBits())
18104     return SDValue();
18105   if (any_of(Op->uses(), [&UseMVT](const SDNode *N) {
18106         return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT;
18107       }))
18108     return SDValue();
18109 
18110   APInt V;
18111   if (!ISD::isConstantSplatVector(Op->getOperand(1).getNode(), V))
18112     return SDValue();
18113 
18114   SDLoc DL(Op);
18115   SDValue Op0ExtV;
18116   SDValue Op1ExtV;
18117   ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(2))->get();
18118   // Check if the first operand of the SET_CC is already extended. If it is,
18119   // split the SET_CC and re-use the extended version of the operand.
18120   SDNode *Op0SExt = DAG.getNodeIfExists(ISD::SIGN_EXTEND, DAG.getVTList(UseMVT),
18121                                         Op->getOperand(0));
18122   SDNode *Op0ZExt = DAG.getNodeIfExists(ISD::ZERO_EXTEND, DAG.getVTList(UseMVT),
18123                                         Op->getOperand(0));
18124   if (Op0SExt && (isSignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18125     Op0ExtV = SDValue(Op0SExt, 0);
18126     Op1ExtV = DAG.getNode(ISD::SIGN_EXTEND, DL, UseMVT, Op->getOperand(1));
18127   } else if (Op0ZExt && (isUnsignedIntSetCC(CC) || isIntEqualitySetCC(CC))) {
18128     Op0ExtV = SDValue(Op0ZExt, 0);
18129     Op1ExtV = DAG.getNode(ISD::ZERO_EXTEND, DL, UseMVT, Op->getOperand(1));
18130   } else
18131     return SDValue();
18132 
18133   return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
18134                      Op0ExtV, Op1ExtV, Op->getOperand(2));
18135 }
18136 
18137 static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
18138   assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
18139   SDValue LHS = N->getOperand(0);
18140   SDValue RHS = N->getOperand(1);
18141   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
18142   SDLoc DL(N);
18143   EVT VT = N->getValueType(0);
18144 
18145   if (SDValue V = tryToWidenSetCCOperands(N, DAG))
18146     return V;
18147 
18148   // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X
18149   if (Cond == ISD::SETNE && isOneConstant(RHS) &&
18150       LHS->getOpcode() == AArch64ISD::CSEL &&
18151       isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) &&
18152       LHS->hasOneUse()) {
18153     // Invert CSEL's condition.
18154     auto *OpCC = cast<ConstantSDNode>(LHS.getOperand(2));
18155     auto OldCond = static_cast<AArch64CC::CondCode>(OpCC->getZExtValue());
18156     auto NewCond = getInvertedCondCode(OldCond);
18157 
18158     // csel 0, 1, !cond, X
18159     SDValue CSEL =
18160         DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0),
18161                     LHS.getOperand(1), DAG.getConstant(NewCond, DL, MVT::i32),
18162                     LHS.getOperand(3));
18163     return DAG.getZExtOrTrunc(CSEL, DL, VT);
18164   }
18165 
18166   // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne
18167   if (Cond == ISD::SETNE && isNullConstant(RHS) &&
18168       LHS->getOpcode() == ISD::SRL && isa<ConstantSDNode>(LHS->getOperand(1)) &&
18169       LHS->hasOneUse()) {
18170     EVT TstVT = LHS->getValueType(0);
18171     if (TstVT.isScalarInteger() && TstVT.getFixedSizeInBits() <= 64) {
18172       // this pattern will get better opt in emitComparison
18173       uint64_t TstImm = -1ULL << LHS->getConstantOperandVal(1);
18174       SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0),
18175                                 DAG.getConstant(TstImm, DL, TstVT));
18176       return DAG.getNode(ISD::SETCC, DL, VT, TST, RHS, N->getOperand(2));
18177     }
18178   }
18179 
18180   return SDValue();
18181 }
18182 
18183 // Replace a flag-setting operator (eg ANDS) with the generic version
18184 // (eg AND) if the flag is unused.
18185 static SDValue performFlagSettingCombine(SDNode *N,
18186                                          TargetLowering::DAGCombinerInfo &DCI,
18187                                          unsigned GenericOpcode) {
18188   SDLoc DL(N);
18189   SDValue LHS = N->getOperand(0);
18190   SDValue RHS = N->getOperand(1);
18191   EVT VT = N->getValueType(0);
18192 
18193   // If the flag result isn't used, convert back to a generic opcode.
18194   if (!N->hasAnyUseOfValue(1)) {
18195     SDValue Res = DCI.DAG.getNode(GenericOpcode, DL, VT, N->ops());
18196     return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)},
18197                                   DL);
18198   }
18199 
18200   // Combine identical generic nodes into this node, re-using the result.
18201   if (SDNode *Generic = DCI.DAG.getNodeIfExists(
18202           GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
18203     DCI.CombineTo(Generic, SDValue(N, 0));
18204 
18205   return SDValue();
18206 }
18207 
18208 static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {
18209   // setcc_merge_zero pred
18210   //   (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne
18211   //   => extract_subvector (inner setcc_merge_zero)
18212   SDValue Pred = N->getOperand(0);
18213   SDValue LHS = N->getOperand(1);
18214   SDValue RHS = N->getOperand(2);
18215   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18216 
18217   if (Cond != ISD::SETNE || !isZerosVector(RHS.getNode()) ||
18218       LHS->getOpcode() != ISD::SIGN_EXTEND)
18219     return SDValue();
18220 
18221   SDValue Extract = LHS->getOperand(0);
18222   if (Extract->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
18223       Extract->getValueType(0) != N->getValueType(0) ||
18224       Extract->getConstantOperandVal(1) != 0)
18225     return SDValue();
18226 
18227   SDValue InnerSetCC = Extract->getOperand(0);
18228   if (InnerSetCC->getOpcode() != AArch64ISD::SETCC_MERGE_ZERO)
18229     return SDValue();
18230 
18231   // By this point we've effectively got
18232   // zero_inactive_lanes_and_trunc_i1(sext_i1(A)). If we can prove A's inactive
18233   // lanes are already zero then the trunc(sext()) sequence is redundant and we
18234   // can operate on A directly.
18235   SDValue InnerPred = InnerSetCC.getOperand(0);
18236   if (Pred.getOpcode() == AArch64ISD::PTRUE &&
18237       InnerPred.getOpcode() == AArch64ISD::PTRUE &&
18238       Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) &&
18239       Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 &&
18240       Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256)
18241     return Extract;
18242 
18243   return SDValue();
18244 }
18245 
18246 static SDValue
18247 performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
18248   assert(N->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18249          "Unexpected opcode!");
18250 
18251   SelectionDAG &DAG = DCI.DAG;
18252   SDValue Pred = N->getOperand(0);
18253   SDValue LHS = N->getOperand(1);
18254   SDValue RHS = N->getOperand(2);
18255   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(3))->get();
18256 
18257   if (SDValue V = performSetCCPunpkCombine(N, DAG))
18258     return V;
18259 
18260   if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) &&
18261       LHS->getOpcode() == ISD::SIGN_EXTEND &&
18262       LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) {
18263     //    setcc_merge_zero(
18264     //       pred, extend(setcc_merge_zero(pred, ...)), != splat(0))
18265     // => setcc_merge_zero(pred, ...)
18266     if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO &&
18267         LHS->getOperand(0)->getOperand(0) == Pred)
18268       return LHS->getOperand(0);
18269 
18270     //    setcc_merge_zero(
18271     //        all_active, extend(nxvNi1 ...), != splat(0))
18272     // -> nxvNi1 ...
18273     if (isAllActivePredicate(DAG, Pred))
18274       return LHS->getOperand(0);
18275 
18276     //    setcc_merge_zero(
18277     //        pred, extend(nxvNi1 ...), != splat(0))
18278     // -> nxvNi1 and(pred, ...)
18279     if (DCI.isAfterLegalizeDAG())
18280       // Do this after legalization to allow more folds on setcc_merge_zero
18281       // to be recognized.
18282       return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0),
18283                          LHS->getOperand(0), Pred);
18284   }
18285 
18286   return SDValue();
18287 }
18288 
18289 // Optimize some simple tbz/tbnz cases.  Returns the new operand and bit to test
18290 // as well as whether the test should be inverted.  This code is required to
18291 // catch these cases (as opposed to standard dag combines) because
18292 // AArch64ISD::TBZ is matched during legalization.
18293 static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
18294                                  SelectionDAG &DAG) {
18295 
18296   if (!Op->hasOneUse())
18297     return Op;
18298 
18299   // We don't handle undef/constant-fold cases below, as they should have
18300   // already been taken care of (e.g. and of 0, test of undefined shifted bits,
18301   // etc.)
18302 
18303   // (tbz (trunc x), b) -> (tbz x, b)
18304   // This case is just here to enable more of the below cases to be caught.
18305   if (Op->getOpcode() == ISD::TRUNCATE &&
18306       Bit < Op->getValueType(0).getSizeInBits()) {
18307     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18308   }
18309 
18310   // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
18311   if (Op->getOpcode() == ISD::ANY_EXTEND &&
18312       Bit < Op->getOperand(0).getValueSizeInBits()) {
18313     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18314   }
18315 
18316   if (Op->getNumOperands() != 2)
18317     return Op;
18318 
18319   auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
18320   if (!C)
18321     return Op;
18322 
18323   switch (Op->getOpcode()) {
18324   default:
18325     return Op;
18326 
18327   // (tbz (and x, m), b) -> (tbz x, b)
18328   case ISD::AND:
18329     if ((C->getZExtValue() >> Bit) & 1)
18330       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18331     return Op;
18332 
18333   // (tbz (shl x, c), b) -> (tbz x, b-c)
18334   case ISD::SHL:
18335     if (C->getZExtValue() <= Bit &&
18336         (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18337       Bit = Bit - C->getZExtValue();
18338       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18339     }
18340     return Op;
18341 
18342   // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x
18343   case ISD::SRA:
18344     Bit = Bit + C->getZExtValue();
18345     if (Bit >= Op->getValueType(0).getSizeInBits())
18346       Bit = Op->getValueType(0).getSizeInBits() - 1;
18347     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18348 
18349   // (tbz (srl x, c), b) -> (tbz x, b+c)
18350   case ISD::SRL:
18351     if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
18352       Bit = Bit + C->getZExtValue();
18353       return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18354     }
18355     return Op;
18356 
18357   // (tbz (xor x, -1), b) -> (tbnz x, b)
18358   case ISD::XOR:
18359     if ((C->getZExtValue() >> Bit) & 1)
18360       Invert = !Invert;
18361     return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG);
18362   }
18363 }
18364 
18365 // Optimize test single bit zero/non-zero and branch.
18366 static SDValue performTBZCombine(SDNode *N,
18367                                  TargetLowering::DAGCombinerInfo &DCI,
18368                                  SelectionDAG &DAG) {
18369   unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
18370   bool Invert = false;
18371   SDValue TestSrc = N->getOperand(1);
18372   SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
18373 
18374   if (TestSrc == NewTestSrc)
18375     return SDValue();
18376 
18377   unsigned NewOpc = N->getOpcode();
18378   if (Invert) {
18379     if (NewOpc == AArch64ISD::TBZ)
18380       NewOpc = AArch64ISD::TBNZ;
18381     else {
18382       assert(NewOpc == AArch64ISD::TBNZ);
18383       NewOpc = AArch64ISD::TBZ;
18384     }
18385   }
18386 
18387   SDLoc DL(N);
18388   return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc,
18389                      DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3));
18390 }
18391 
18392 // Swap vselect operands where it may allow a predicated operation to achieve
18393 // the `sel`.
18394 //
18395 //     (vselect (setcc ( condcode) (_) (_)) (a)          (op (a) (b)))
18396 //  => (vselect (setcc (!condcode) (_) (_)) (op (a) (b)) (a))
18397 static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {
18398   auto SelectA = N->getOperand(1);
18399   auto SelectB = N->getOperand(2);
18400   auto NTy = N->getValueType(0);
18401 
18402   if (!NTy.isScalableVector())
18403     return SDValue();
18404   SDValue SetCC = N->getOperand(0);
18405   if (SetCC.getOpcode() != ISD::SETCC || !SetCC.hasOneUse())
18406     return SDValue();
18407 
18408   switch (SelectB.getOpcode()) {
18409   default:
18410     return SDValue();
18411   case ISD::FMUL:
18412   case ISD::FSUB:
18413   case ISD::FADD:
18414     break;
18415   }
18416   if (SelectA != SelectB.getOperand(0))
18417     return SDValue();
18418 
18419   ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
18420   ISD::CondCode InverseCC =
18421       ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType());
18422   auto InverseSetCC =
18423       DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0),
18424                    SetCC.getOperand(1), InverseCC);
18425 
18426   return DAG.getNode(ISD::VSELECT, SDLoc(N), NTy,
18427                      {InverseSetCC, SelectB, SelectA});
18428 }
18429 
18430 // vselect (v1i1 setcc) ->
18431 //     vselect (v1iXX setcc)  (XX is the size of the compared operand type)
18432 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
18433 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
18434 // such VSELECT.
18435 static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {
18436   if (auto SwapResult = trySwapVSelectOperands(N, DAG))
18437     return SwapResult;
18438 
18439   SDValue N0 = N->getOperand(0);
18440   EVT CCVT = N0.getValueType();
18441 
18442   if (isAllActivePredicate(DAG, N0))
18443     return N->getOperand(1);
18444 
18445   if (isAllInactivePredicate(N0))
18446     return N->getOperand(2);
18447 
18448   // Check for sign pattern (VSELECT setgt, iN lhs, -1, 1, -1) and transform
18449   // into (OR (ASR lhs, N-1), 1), which requires less instructions for the
18450   // supported types.
18451   SDValue SetCC = N->getOperand(0);
18452   if (SetCC.getOpcode() == ISD::SETCC &&
18453       SetCC.getOperand(2) == DAG.getCondCode(ISD::SETGT)) {
18454     SDValue CmpLHS = SetCC.getOperand(0);
18455     EVT VT = CmpLHS.getValueType();
18456     SDNode *CmpRHS = SetCC.getOperand(1).getNode();
18457     SDNode *SplatLHS = N->getOperand(1).getNode();
18458     SDNode *SplatRHS = N->getOperand(2).getNode();
18459     APInt SplatLHSVal;
18460     if (CmpLHS.getValueType() == N->getOperand(1).getValueType() &&
18461         VT.isSimple() &&
18462         is_contained(
18463             makeArrayRef({MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
18464                           MVT::v2i32, MVT::v4i32, MVT::v2i64}),
18465             VT.getSimpleVT().SimpleTy) &&
18466         ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) &&
18467         SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) &&
18468         ISD::isConstantSplatVectorAllOnes(SplatRHS)) {
18469       unsigned NumElts = VT.getVectorNumElements();
18470       SmallVector<SDValue, 8> Ops(
18471           NumElts, DAG.getConstant(VT.getScalarSizeInBits() - 1, SDLoc(N),
18472                                    VT.getScalarType()));
18473       SDValue Val = DAG.getBuildVector(VT, SDLoc(N), Ops);
18474 
18475       auto Shift = DAG.getNode(ISD::SRA, SDLoc(N), VT, CmpLHS, Val);
18476       auto Or = DAG.getNode(ISD::OR, SDLoc(N), VT, Shift, N->getOperand(1));
18477       return Or;
18478     }
18479   }
18480 
18481   if (N0.getOpcode() != ISD::SETCC ||
18482       CCVT.getVectorElementCount() != ElementCount::getFixed(1) ||
18483       CCVT.getVectorElementType() != MVT::i1)
18484     return SDValue();
18485 
18486   EVT ResVT = N->getValueType(0);
18487   EVT CmpVT = N0.getOperand(0).getValueType();
18488   // Only combine when the result type is of the same size as the compared
18489   // operands.
18490   if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
18491     return SDValue();
18492 
18493   SDValue IfTrue = N->getOperand(1);
18494   SDValue IfFalse = N->getOperand(2);
18495   SetCC = DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
18496                        N0.getOperand(0), N0.getOperand(1),
18497                        cast<CondCodeSDNode>(N0.getOperand(2))->get());
18498   return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
18499                      IfTrue, IfFalse);
18500 }
18501 
18502 /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with
18503 /// the compare-mask instructions rather than going via NZCV, even if LHS and
18504 /// RHS are really scalar. This replaces any scalar setcc in the above pattern
18505 /// with a vector one followed by a DUP shuffle on the result.
18506 static SDValue performSelectCombine(SDNode *N,
18507                                     TargetLowering::DAGCombinerInfo &DCI) {
18508   SelectionDAG &DAG = DCI.DAG;
18509   SDValue N0 = N->getOperand(0);
18510   EVT ResVT = N->getValueType(0);
18511 
18512   if (N0.getOpcode() != ISD::SETCC)
18513     return SDValue();
18514 
18515   if (ResVT.isScalableVector())
18516     return SDValue();
18517 
18518   // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
18519   // scalar SetCCResultType. We also don't expect vectors, because we assume
18520   // that selects fed by vector SETCCs are canonicalized to VSELECT.
18521   assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) &&
18522          "Scalar-SETCC feeding SELECT has unexpected result type!");
18523 
18524   // If NumMaskElts == 0, the comparison is larger than select result. The
18525   // largest real NEON comparison is 64-bits per lane, which means the result is
18526   // at most 32-bits and an illegal vector. Just bail out for now.
18527   EVT SrcVT = N0.getOperand(0).getValueType();
18528 
18529   // Don't try to do this optimization when the setcc itself has i1 operands.
18530   // There are no legal vectors of i1, so this would be pointless.
18531   if (SrcVT == MVT::i1)
18532     return SDValue();
18533 
18534   int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits();
18535   if (!ResVT.isVector() || NumMaskElts == 0)
18536     return SDValue();
18537 
18538   SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts);
18539   EVT CCVT = SrcVT.changeVectorElementTypeToInteger();
18540 
18541   // Also bail out if the vector CCVT isn't the same size as ResVT.
18542   // This can happen if the SETCC operand size doesn't divide the ResVT size
18543   // (e.g., f64 vs v3f32).
18544   if (CCVT.getSizeInBits() != ResVT.getSizeInBits())
18545     return SDValue();
18546 
18547   // Make sure we didn't create illegal types, if we're not supposed to.
18548   assert(DCI.isBeforeLegalize() ||
18549          DAG.getTargetLoweringInfo().isTypeLegal(SrcVT));
18550 
18551   // First perform a vector comparison, where lane 0 is the one we're interested
18552   // in.
18553   SDLoc DL(N0);
18554   SDValue LHS =
18555       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0));
18556   SDValue RHS =
18557       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1));
18558   SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2));
18559 
18560   // Now duplicate the comparison mask we want across all other lanes.
18561   SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
18562   SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
18563   Mask = DAG.getNode(ISD::BITCAST, DL,
18564                      ResVT.changeVectorElementTypeToInteger(), Mask);
18565 
18566   return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2));
18567 }
18568 
18569 static SDValue performDUPCombine(SDNode *N,
18570                                  TargetLowering::DAGCombinerInfo &DCI) {
18571   EVT VT = N->getValueType(0);
18572   // If "v2i32 DUP(x)" and "v4i32 DUP(x)" both exist, use an extract from the
18573   // 128bit vector version.
18574   if (VT.is64BitVector() && DCI.isAfterLegalizeDAG()) {
18575     EVT LVT = VT.getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
18576     if (SDNode *LN = DCI.DAG.getNodeIfExists(
18577             N->getOpcode(), DCI.DAG.getVTList(LVT), {N->getOperand(0)})) {
18578       SDLoc DL(N);
18579       return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0),
18580                              DCI.DAG.getConstant(0, DL, MVT::i64));
18581     }
18582   }
18583 
18584   return performPostLD1Combine(N, DCI, false);
18585 }
18586 
18587 /// Get rid of unnecessary NVCASTs (that don't change the type).
18588 static SDValue performNVCASTCombine(SDNode *N) {
18589   if (N->getValueType(0) == N->getOperand(0).getValueType())
18590     return N->getOperand(0);
18591 
18592   return SDValue();
18593 }
18594 
18595 // If all users of the globaladdr are of the form (globaladdr + constant), find
18596 // the smallest constant, fold it into the globaladdr's offset and rewrite the
18597 // globaladdr as (globaladdr + constant) - constant.
18598 static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
18599                                            const AArch64Subtarget *Subtarget,
18600                                            const TargetMachine &TM) {
18601   auto *GN = cast<GlobalAddressSDNode>(N);
18602   if (Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) !=
18603       AArch64II::MO_NO_FLAG)
18604     return SDValue();
18605 
18606   uint64_t MinOffset = -1ull;
18607   for (SDNode *N : GN->uses()) {
18608     if (N->getOpcode() != ISD::ADD)
18609       return SDValue();
18610     auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0));
18611     if (!C)
18612       C = dyn_cast<ConstantSDNode>(N->getOperand(1));
18613     if (!C)
18614       return SDValue();
18615     MinOffset = std::min(MinOffset, C->getZExtValue());
18616   }
18617   uint64_t Offset = MinOffset + GN->getOffset();
18618 
18619   // Require that the new offset is larger than the existing one. Otherwise, we
18620   // can end up oscillating between two possible DAGs, for example,
18621   // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1).
18622   if (Offset <= uint64_t(GN->getOffset()))
18623     return SDValue();
18624 
18625   // Check whether folding this offset is legal. It must not go out of bounds of
18626   // the referenced object to avoid violating the code model, and must be
18627   // smaller than 2^20 because this is the largest offset expressible in all
18628   // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
18629   // stores an immediate signed 21 bit offset.)
18630   //
18631   // This check also prevents us from folding negative offsets, which will end
18632   // up being treated in the same way as large positive ones. They could also
18633   // cause code model violations, and aren't really common enough to matter.
18634   if (Offset >= (1 << 20))
18635     return SDValue();
18636 
18637   const GlobalValue *GV = GN->getGlobal();
18638   Type *T = GV->getValueType();
18639   if (!T->isSized() ||
18640       Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
18641     return SDValue();
18642 
18643   SDLoc DL(GN);
18644   SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset);
18645   return DAG.getNode(ISD::SUB, DL, MVT::i64, Result,
18646                      DAG.getConstant(MinOffset, DL, MVT::i64));
18647 }
18648 
18649 // Turns the vector of indices into a vector of byte offstes by scaling Offset
18650 // by (BitWidth / 8).
18651 static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
18652                                           SDLoc DL, unsigned BitWidth) {
18653   assert(Offset.getValueType().isScalableVector() &&
18654          "This method is only for scalable vectors of offsets");
18655 
18656   SDValue Shift = DAG.getConstant(Log2_32(BitWidth / 8), DL, MVT::i64);
18657   SDValue SplatShift = DAG.getNode(ISD::SPLAT_VECTOR, DL, MVT::nxv2i64, Shift);
18658 
18659   return DAG.getNode(ISD::SHL, DL, MVT::nxv2i64, Offset, SplatShift);
18660 }
18661 
18662 /// Check if the value of \p OffsetInBytes can be used as an immediate for
18663 /// the gather load/prefetch and scatter store instructions with vector base and
18664 /// immediate offset addressing mode:
18665 ///
18666 ///      [<Zn>.[S|D]{, #<imm>}]
18667 ///
18668 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18669 inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
18670                                                   unsigned ScalarSizeInBytes) {
18671   // The immediate is not a multiple of the scalar size.
18672   if (OffsetInBytes % ScalarSizeInBytes)
18673     return false;
18674 
18675   // The immediate is out of range.
18676   if (OffsetInBytes / ScalarSizeInBytes > 31)
18677     return false;
18678 
18679   return true;
18680 }
18681 
18682 /// Check if the value of \p Offset represents a valid immediate for the SVE
18683 /// gather load/prefetch and scatter store instructiona with vector base and
18684 /// immediate offset addressing mode:
18685 ///
18686 ///      [<Zn>.[S|D]{, #<imm>}]
18687 ///
18688 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
18689 static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
18690                                            unsigned ScalarSizeInBytes) {
18691   ConstantSDNode *OffsetConst = dyn_cast<ConstantSDNode>(Offset.getNode());
18692   return OffsetConst && isValidImmForSVEVecImmAddrMode(
18693                             OffsetConst->getZExtValue(), ScalarSizeInBytes);
18694 }
18695 
18696 static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
18697                                           unsigned Opcode,
18698                                           bool OnlyPackedOffsets = true) {
18699   const SDValue Src = N->getOperand(2);
18700   const EVT SrcVT = Src->getValueType(0);
18701   assert(SrcVT.isScalableVector() &&
18702          "Scatter stores are only possible for SVE vectors");
18703 
18704   SDLoc DL(N);
18705   MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT();
18706 
18707   // Make sure that source data will fit into an SVE register
18708   if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
18709     return SDValue();
18710 
18711   // For FPs, ACLE only supports _packed_ single and double precision types.
18712   if (SrcElVT.isFloatingPoint())
18713     if ((SrcVT != MVT::nxv4f32) && (SrcVT != MVT::nxv2f64))
18714       return SDValue();
18715 
18716   // Depending on the addressing mode, this is either a pointer or a vector of
18717   // pointers (that fits into one register)
18718   SDValue Base = N->getOperand(4);
18719   // Depending on the addressing mode, this is either a single offset or a
18720   // vector of offsets  (that fits into one register)
18721   SDValue Offset = N->getOperand(5);
18722 
18723   // For "scalar + vector of indices", just scale the indices. This only
18724   // applies to non-temporal scatters because there's no instruction that takes
18725   // indicies.
18726   if (Opcode == AArch64ISD::SSTNT1_INDEX_PRED) {
18727     Offset =
18728         getScaledOffsetForBitWidth(DAG, Offset, DL, SrcElVT.getSizeInBits());
18729     Opcode = AArch64ISD::SSTNT1_PRED;
18730   }
18731 
18732   // In the case of non-temporal gather loads there's only one SVE instruction
18733   // per data-size: "scalar + vector", i.e.
18734   //    * stnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
18735   // Since we do have intrinsics that allow the arguments to be in a different
18736   // order, we may need to swap them to match the spec.
18737   if (Opcode == AArch64ISD::SSTNT1_PRED && Offset.getValueType().isVector())
18738     std::swap(Base, Offset);
18739 
18740   // SST1_IMM requires that the offset is an immediate that is:
18741   //    * a multiple of #SizeInBytes,
18742   //    * in the range [0, 31 x #SizeInBytes],
18743   // where #SizeInBytes is the size in bytes of the stored items. For
18744   // immediates outside that range and non-immediate scalar offsets use SST1 or
18745   // SST1_UXTW instead.
18746   if (Opcode == AArch64ISD::SST1_IMM_PRED) {
18747     if (!isValidImmForSVEVecImmAddrMode(Offset,
18748                                         SrcVT.getScalarSizeInBits() / 8)) {
18749       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
18750         Opcode = AArch64ISD::SST1_UXTW_PRED;
18751       else
18752         Opcode = AArch64ISD::SST1_PRED;
18753 
18754       std::swap(Base, Offset);
18755     }
18756   }
18757 
18758   auto &TLI = DAG.getTargetLoweringInfo();
18759   if (!TLI.isTypeLegal(Base.getValueType()))
18760     return SDValue();
18761 
18762   // Some scatter store variants allow unpacked offsets, but only as nxv2i32
18763   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
18764   // nxv2i64. Legalize accordingly.
18765   if (!OnlyPackedOffsets &&
18766       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
18767     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
18768 
18769   if (!TLI.isTypeLegal(Offset.getValueType()))
18770     return SDValue();
18771 
18772   // Source value type that is representable in hardware
18773   EVT HwSrcVt = getSVEContainerType(SrcVT);
18774 
18775   // Keep the original type of the input data to store - this is needed to be
18776   // able to select the correct instruction, e.g. ST1B, ST1H, ST1W and ST1D. For
18777   // FP values we want the integer equivalent, so just use HwSrcVt.
18778   SDValue InputVT = DAG.getValueType(SrcVT);
18779   if (SrcVT.isFloatingPoint())
18780     InputVT = DAG.getValueType(HwSrcVt);
18781 
18782   SDVTList VTs = DAG.getVTList(MVT::Other);
18783   SDValue SrcNew;
18784 
18785   if (Src.getValueType().isFloatingPoint())
18786     SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Src);
18787   else
18788     SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Src);
18789 
18790   SDValue Ops[] = {N->getOperand(0), // Chain
18791                    SrcNew,
18792                    N->getOperand(3), // Pg
18793                    Base,
18794                    Offset,
18795                    InputVT};
18796 
18797   return DAG.getNode(Opcode, DL, VTs, Ops);
18798 }
18799 
18800 static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
18801                                         unsigned Opcode,
18802                                         bool OnlyPackedOffsets = true) {
18803   const EVT RetVT = N->getValueType(0);
18804   assert(RetVT.isScalableVector() &&
18805          "Gather loads are only possible for SVE vectors");
18806 
18807   SDLoc DL(N);
18808 
18809   // Make sure that the loaded data will fit into an SVE register
18810   if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock)
18811     return SDValue();
18812 
18813   // Depending on the addressing mode, this is either a pointer or a vector of
18814   // pointers (that fits into one register)
18815   SDValue Base = N->getOperand(3);
18816   // Depending on the addressing mode, this is either a single offset or a
18817   // vector of offsets  (that fits into one register)
18818   SDValue Offset = N->getOperand(4);
18819 
18820   // For "scalar + vector of indices", just scale the indices. This only
18821   // applies to non-temporal gathers because there's no instruction that takes
18822   // indicies.
18823   if (Opcode == AArch64ISD::GLDNT1_INDEX_MERGE_ZERO) {
18824     Offset = getScaledOffsetForBitWidth(DAG, Offset, DL,
18825                                         RetVT.getScalarSizeInBits());
18826     Opcode = AArch64ISD::GLDNT1_MERGE_ZERO;
18827   }
18828 
18829   // In the case of non-temporal gather loads there's only one SVE instruction
18830   // per data-size: "scalar + vector", i.e.
18831   //    * ldnt1{b|h|w|d} { z0.s }, p0/z, [z0.s, x0]
18832   // Since we do have intrinsics that allow the arguments to be in a different
18833   // order, we may need to swap them to match the spec.
18834   if (Opcode == AArch64ISD::GLDNT1_MERGE_ZERO &&
18835       Offset.getValueType().isVector())
18836     std::swap(Base, Offset);
18837 
18838   // GLD{FF}1_IMM requires that the offset is an immediate that is:
18839   //    * a multiple of #SizeInBytes,
18840   //    * in the range [0, 31 x #SizeInBytes],
18841   // where #SizeInBytes is the size in bytes of the loaded items. For
18842   // immediates outside that range and non-immediate scalar offsets use
18843   // GLD1_MERGE_ZERO or GLD1_UXTW_MERGE_ZERO instead.
18844   if (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO ||
18845       Opcode == AArch64ISD::GLDFF1_IMM_MERGE_ZERO) {
18846     if (!isValidImmForSVEVecImmAddrMode(Offset,
18847                                         RetVT.getScalarSizeInBits() / 8)) {
18848       if (MVT::nxv4i32 == Base.getValueType().getSimpleVT().SimpleTy)
18849         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
18850                      ? AArch64ISD::GLD1_UXTW_MERGE_ZERO
18851                      : AArch64ISD::GLDFF1_UXTW_MERGE_ZERO;
18852       else
18853         Opcode = (Opcode == AArch64ISD::GLD1_IMM_MERGE_ZERO)
18854                      ? AArch64ISD::GLD1_MERGE_ZERO
18855                      : AArch64ISD::GLDFF1_MERGE_ZERO;
18856 
18857       std::swap(Base, Offset);
18858     }
18859   }
18860 
18861   auto &TLI = DAG.getTargetLoweringInfo();
18862   if (!TLI.isTypeLegal(Base.getValueType()))
18863     return SDValue();
18864 
18865   // Some gather load variants allow unpacked offsets, but only as nxv2i32
18866   // vectors. These are implicitly sign (sxtw) or zero (zxtw) extend to
18867   // nxv2i64. Legalize accordingly.
18868   if (!OnlyPackedOffsets &&
18869       Offset.getValueType().getSimpleVT().SimpleTy == MVT::nxv2i32)
18870     Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0);
18871 
18872   // Return value type that is representable in hardware
18873   EVT HwRetVt = getSVEContainerType(RetVT);
18874 
18875   // Keep the original output value type around - this is needed to be able to
18876   // select the correct instruction, e.g. LD1B, LD1H, LD1W and LD1D. For FP
18877   // values we want the integer equivalent, so just use HwRetVT.
18878   SDValue OutVT = DAG.getValueType(RetVT);
18879   if (RetVT.isFloatingPoint())
18880     OutVT = DAG.getValueType(HwRetVt);
18881 
18882   SDVTList VTs = DAG.getVTList(HwRetVt, MVT::Other);
18883   SDValue Ops[] = {N->getOperand(0), // Chain
18884                    N->getOperand(2), // Pg
18885                    Base, Offset, OutVT};
18886 
18887   SDValue Load = DAG.getNode(Opcode, DL, VTs, Ops);
18888   SDValue LoadChain = SDValue(Load.getNode(), 1);
18889 
18890   if (RetVT.isInteger() && (RetVT != HwRetVt))
18891     Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0));
18892 
18893   // If the original return value was FP, bitcast accordingly. Doing it here
18894   // means that we can avoid adding TableGen patterns for FPs.
18895   if (RetVT.isFloatingPoint())
18896     Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0));
18897 
18898   return DAG.getMergeValues({Load, LoadChain}, DL);
18899 }
18900 
18901 static SDValue
18902 performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
18903                               SelectionDAG &DAG) {
18904   SDLoc DL(N);
18905   SDValue Src = N->getOperand(0);
18906   unsigned Opc = Src->getOpcode();
18907 
18908   // Sign extend of an unsigned unpack -> signed unpack
18909   if (Opc == AArch64ISD::UUNPKHI || Opc == AArch64ISD::UUNPKLO) {
18910 
18911     unsigned SOpc = Opc == AArch64ISD::UUNPKHI ? AArch64ISD::SUNPKHI
18912                                                : AArch64ISD::SUNPKLO;
18913 
18914     // Push the sign extend to the operand of the unpack
18915     // This is necessary where, for example, the operand of the unpack
18916     // is another unpack:
18917     // 4i32 sign_extend_inreg (4i32 uunpklo(8i16 uunpklo (16i8 opnd)), from 4i8)
18918     // ->
18919     // 4i32 sunpklo (8i16 sign_extend_inreg(8i16 uunpklo (16i8 opnd), from 8i8)
18920     // ->
18921     // 4i32 sunpklo(8i16 sunpklo(16i8 opnd))
18922     SDValue ExtOp = Src->getOperand(0);
18923     auto VT = cast<VTSDNode>(N->getOperand(1))->getVT();
18924     EVT EltTy = VT.getVectorElementType();
18925     (void)EltTy;
18926 
18927     assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) &&
18928            "Sign extending from an invalid type");
18929 
18930     EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext());
18931 
18932     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(),
18933                               ExtOp, DAG.getValueType(ExtVT));
18934 
18935     return DAG.getNode(SOpc, DL, N->getValueType(0), Ext);
18936   }
18937 
18938   if (DCI.isBeforeLegalizeOps())
18939     return SDValue();
18940 
18941   if (!EnableCombineMGatherIntrinsics)
18942     return SDValue();
18943 
18944   // SVE load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
18945   // for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
18946   unsigned NewOpc;
18947   unsigned MemVTOpNum = 4;
18948   switch (Opc) {
18949   case AArch64ISD::LD1_MERGE_ZERO:
18950     NewOpc = AArch64ISD::LD1S_MERGE_ZERO;
18951     MemVTOpNum = 3;
18952     break;
18953   case AArch64ISD::LDNF1_MERGE_ZERO:
18954     NewOpc = AArch64ISD::LDNF1S_MERGE_ZERO;
18955     MemVTOpNum = 3;
18956     break;
18957   case AArch64ISD::LDFF1_MERGE_ZERO:
18958     NewOpc = AArch64ISD::LDFF1S_MERGE_ZERO;
18959     MemVTOpNum = 3;
18960     break;
18961   case AArch64ISD::GLD1_MERGE_ZERO:
18962     NewOpc = AArch64ISD::GLD1S_MERGE_ZERO;
18963     break;
18964   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
18965     NewOpc = AArch64ISD::GLD1S_SCALED_MERGE_ZERO;
18966     break;
18967   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
18968     NewOpc = AArch64ISD::GLD1S_SXTW_MERGE_ZERO;
18969     break;
18970   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
18971     NewOpc = AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO;
18972     break;
18973   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
18974     NewOpc = AArch64ISD::GLD1S_UXTW_MERGE_ZERO;
18975     break;
18976   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
18977     NewOpc = AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO;
18978     break;
18979   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
18980     NewOpc = AArch64ISD::GLD1S_IMM_MERGE_ZERO;
18981     break;
18982   case AArch64ISD::GLDFF1_MERGE_ZERO:
18983     NewOpc = AArch64ISD::GLDFF1S_MERGE_ZERO;
18984     break;
18985   case AArch64ISD::GLDFF1_SCALED_MERGE_ZERO:
18986     NewOpc = AArch64ISD::GLDFF1S_SCALED_MERGE_ZERO;
18987     break;
18988   case AArch64ISD::GLDFF1_SXTW_MERGE_ZERO:
18989     NewOpc = AArch64ISD::GLDFF1S_SXTW_MERGE_ZERO;
18990     break;
18991   case AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO:
18992     NewOpc = AArch64ISD::GLDFF1S_SXTW_SCALED_MERGE_ZERO;
18993     break;
18994   case AArch64ISD::GLDFF1_UXTW_MERGE_ZERO:
18995     NewOpc = AArch64ISD::GLDFF1S_UXTW_MERGE_ZERO;
18996     break;
18997   case AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO:
18998     NewOpc = AArch64ISD::GLDFF1S_UXTW_SCALED_MERGE_ZERO;
18999     break;
19000   case AArch64ISD::GLDFF1_IMM_MERGE_ZERO:
19001     NewOpc = AArch64ISD::GLDFF1S_IMM_MERGE_ZERO;
19002     break;
19003   case AArch64ISD::GLDNT1_MERGE_ZERO:
19004     NewOpc = AArch64ISD::GLDNT1S_MERGE_ZERO;
19005     break;
19006   default:
19007     return SDValue();
19008   }
19009 
19010   EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
19011   EVT SrcMemVT = cast<VTSDNode>(Src->getOperand(MemVTOpNum))->getVT();
19012 
19013   if ((SignExtSrcVT != SrcMemVT) || !Src.hasOneUse())
19014     return SDValue();
19015 
19016   EVT DstVT = N->getValueType(0);
19017   SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
19018 
19019   SmallVector<SDValue, 5> Ops;
19020   for (unsigned I = 0; I < Src->getNumOperands(); ++I)
19021     Ops.push_back(Src->getOperand(I));
19022 
19023   SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
19024   DCI.CombineTo(N, ExtLoad);
19025   DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
19026 
19027   // Return N so it doesn't get rechecked
19028   return SDValue(N, 0);
19029 }
19030 
19031 /// Legalize the gather prefetch (scalar + vector addressing mode) when the
19032 /// offset vector is an unpacked 32-bit scalable vector. The other cases (Offset
19033 /// != nxv2i32) do not need legalization.
19034 static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {
19035   const unsigned OffsetPos = 4;
19036   SDValue Offset = N->getOperand(OffsetPos);
19037 
19038   // Not an unpacked vector, bail out.
19039   if (Offset.getValueType().getSimpleVT().SimpleTy != MVT::nxv2i32)
19040     return SDValue();
19041 
19042   // Extend the unpacked offset vector to 64-bit lanes.
19043   SDLoc DL(N);
19044   Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset);
19045   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19046   // Replace the offset operand with the 64-bit one.
19047   Ops[OffsetPos] = Offset;
19048 
19049   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19050 }
19051 
19052 /// Combines a node carrying the intrinsic
19053 /// `aarch64_sve_prf<T>_gather_scalar_offset` into a node that uses
19054 /// `aarch64_sve_prfb_gather_uxtw_index` when the scalar offset passed to
19055 /// `aarch64_sve_prf<T>_gather_scalar_offset` is not a valid immediate for the
19056 /// sve gather prefetch instruction with vector plus immediate addressing mode.
19057 static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
19058                                                unsigned ScalarSizeInBytes) {
19059   const unsigned ImmPos = 4, OffsetPos = 3;
19060   // No need to combine the node if the immediate is valid...
19061   if (isValidImmForSVEVecImmAddrMode(N->getOperand(ImmPos), ScalarSizeInBytes))
19062     return SDValue();
19063 
19064   // ...otherwise swap the offset base with the offset...
19065   SmallVector<SDValue, 5> Ops(N->op_begin(), N->op_end());
19066   std::swap(Ops[ImmPos], Ops[OffsetPos]);
19067   // ...and remap the intrinsic `aarch64_sve_prf<T>_gather_scalar_offset` to
19068   // `aarch64_sve_prfb_gather_uxtw_index`.
19069   SDLoc DL(N);
19070   Ops[1] = DAG.getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index, DL,
19071                            MVT::i64);
19072 
19073   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
19074 }
19075 
19076 // Return true if the vector operation can guarantee only the first lane of its
19077 // result contains data, with all bits in other lanes set to zero.
19078 static bool isLanes1toNKnownZero(SDValue Op) {
19079   switch (Op.getOpcode()) {
19080   default:
19081     return false;
19082   case AArch64ISD::ANDV_PRED:
19083   case AArch64ISD::EORV_PRED:
19084   case AArch64ISD::FADDA_PRED:
19085   case AArch64ISD::FADDV_PRED:
19086   case AArch64ISD::FMAXNMV_PRED:
19087   case AArch64ISD::FMAXV_PRED:
19088   case AArch64ISD::FMINNMV_PRED:
19089   case AArch64ISD::FMINV_PRED:
19090   case AArch64ISD::ORV_PRED:
19091   case AArch64ISD::SADDV_PRED:
19092   case AArch64ISD::SMAXV_PRED:
19093   case AArch64ISD::SMINV_PRED:
19094   case AArch64ISD::UADDV_PRED:
19095   case AArch64ISD::UMAXV_PRED:
19096   case AArch64ISD::UMINV_PRED:
19097     return true;
19098   }
19099 }
19100 
19101 static SDValue removeRedundantInsertVectorElt(SDNode *N) {
19102   assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
19103   SDValue InsertVec = N->getOperand(0);
19104   SDValue InsertElt = N->getOperand(1);
19105   SDValue InsertIdx = N->getOperand(2);
19106 
19107   // We only care about inserts into the first element...
19108   if (!isNullConstant(InsertIdx))
19109     return SDValue();
19110   // ...of a zero'd vector...
19111   if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
19112     return SDValue();
19113   // ...where the inserted data was previously extracted...
19114   if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19115     return SDValue();
19116 
19117   SDValue ExtractVec = InsertElt.getOperand(0);
19118   SDValue ExtractIdx = InsertElt.getOperand(1);
19119 
19120   // ...from the first element of a vector.
19121   if (!isNullConstant(ExtractIdx))
19122     return SDValue();
19123 
19124   // If we get here we are effectively trying to zero lanes 1-N of a vector.
19125 
19126   // Ensure there's no type conversion going on.
19127   if (N->getValueType(0) != ExtractVec.getValueType())
19128     return SDValue();
19129 
19130   if (!isLanes1toNKnownZero(ExtractVec))
19131     return SDValue();
19132 
19133   // The explicit zeroing is redundant.
19134   return ExtractVec;
19135 }
19136 
19137 static SDValue
19138 performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
19139   if (SDValue Res = removeRedundantInsertVectorElt(N))
19140     return Res;
19141 
19142   return performPostLD1Combine(N, DCI, true);
19143 }
19144 
19145 static SDValue performSVESpliceCombine(SDNode *N, SelectionDAG &DAG) {
19146   EVT Ty = N->getValueType(0);
19147   if (Ty.isInteger())
19148     return SDValue();
19149 
19150   EVT IntTy = Ty.changeVectorElementTypeToInteger();
19151   EVT ExtIntTy = getPackedSVEVectorVT(IntTy.getVectorElementCount());
19152   if (ExtIntTy.getVectorElementType().getScalarSizeInBits() <
19153       IntTy.getVectorElementType().getScalarSizeInBits())
19154     return SDValue();
19155 
19156   SDLoc DL(N);
19157   SDValue LHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(0)),
19158                                      DL, ExtIntTy);
19159   SDValue RHS = DAG.getAnyExtOrTrunc(DAG.getBitcast(IntTy, N->getOperand(1)),
19160                                      DL, ExtIntTy);
19161   SDValue Idx = N->getOperand(2);
19162   SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ExtIntTy, LHS, RHS, Idx);
19163   SDValue Trunc = DAG.getAnyExtOrTrunc(Splice, DL, IntTy);
19164   return DAG.getBitcast(Ty, Trunc);
19165 }
19166 
19167 static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
19168                                       TargetLowering::DAGCombinerInfo &DCI,
19169                                       const AArch64Subtarget *Subtarget) {
19170   SDValue N0 = N->getOperand(0);
19171   EVT VT = N->getValueType(0);
19172 
19173   // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
19174   if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::FP_ROUND)
19175     return SDValue();
19176 
19177   // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
19178   // We purposefully don't care about legality of the nodes here as we know
19179   // they can be split down into something legal.
19180   if (DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(N0.getNode()) &&
19181       N0.hasOneUse() && Subtarget->useSVEForFixedLengthVectors() &&
19182       VT.isFixedLengthVector() &&
19183       VT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits()) {
19184     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
19185     SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
19186                                      LN0->getChain(), LN0->getBasePtr(),
19187                                      N0.getValueType(), LN0->getMemOperand());
19188     DCI.CombineTo(N, ExtLoad);
19189     DCI.CombineTo(N0.getNode(),
19190                   DAG.getNode(ISD::FP_ROUND, SDLoc(N0), N0.getValueType(),
19191                               ExtLoad, DAG.getIntPtrConstant(1, SDLoc(N0))),
19192                   ExtLoad.getValue(1));
19193     return SDValue(N, 0); // Return N so it doesn't get rechecked!
19194   }
19195 
19196   return SDValue();
19197 }
19198 
19199 static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
19200                                       const AArch64Subtarget *Subtarget,
19201                                       bool fixedSVEVectorVT) {
19202   EVT VT = N->getValueType(0);
19203 
19204   // Don't expand for SVE2
19205   if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
19206     return SDValue();
19207 
19208   // Don't expand for NEON
19209   if (VT.isFixedLengthVector() && !fixedSVEVectorVT)
19210     return SDValue();
19211 
19212   SDLoc DL(N);
19213 
19214   SDValue Mask = N->getOperand(0);
19215   SDValue In1 = N->getOperand(1);
19216   SDValue In2 = N->getOperand(2);
19217 
19218   SDValue InvMask = DAG.getNOT(DL, Mask, VT);
19219   SDValue Sel = DAG.getNode(ISD::AND, DL, VT, Mask, In1);
19220   SDValue SelInv = DAG.getNode(ISD::AND, DL, VT, InvMask, In2);
19221   return DAG.getNode(ISD::OR, DL, VT, Sel, SelInv);
19222 }
19223 
19224 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
19225                                                  DAGCombinerInfo &DCI) const {
19226   SelectionDAG &DAG = DCI.DAG;
19227   switch (N->getOpcode()) {
19228   default:
19229     LLVM_DEBUG(dbgs() << "Custom combining: skipping\n");
19230     break;
19231   case ISD::ADD:
19232   case ISD::SUB:
19233     return performAddSubCombine(N, DCI, DAG);
19234   case AArch64ISD::ANDS:
19235     return performFlagSettingCombine(N, DCI, ISD::AND);
19236   case AArch64ISD::ADC:
19237     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19238       return R;
19239     return foldADCToCINC(N, DAG);
19240   case AArch64ISD::SBC:
19241     return foldOverflowCheck(N, DAG, /* IsAdd */ false);
19242   case AArch64ISD::ADCS:
19243     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ true))
19244       return R;
19245     return performFlagSettingCombine(N, DCI, AArch64ISD::ADC);
19246   case AArch64ISD::SBCS:
19247     if (auto R = foldOverflowCheck(N, DAG, /* IsAdd */ false))
19248       return R;
19249     return performFlagSettingCombine(N, DCI, AArch64ISD::SBC);
19250   case ISD::XOR:
19251     return performXorCombine(N, DAG, DCI, Subtarget);
19252   case ISD::MUL:
19253     return performMulCombine(N, DAG, DCI, Subtarget);
19254   case ISD::SINT_TO_FP:
19255   case ISD::UINT_TO_FP:
19256     return performIntToFpCombine(N, DAG, Subtarget);
19257   case ISD::FP_TO_SINT:
19258   case ISD::FP_TO_UINT:
19259   case ISD::FP_TO_SINT_SAT:
19260   case ISD::FP_TO_UINT_SAT:
19261     return performFpToIntCombine(N, DAG, DCI, Subtarget);
19262   case ISD::FDIV:
19263     return performFDivCombine(N, DAG, DCI, Subtarget);
19264   case ISD::OR:
19265     return performORCombine(N, DCI, Subtarget);
19266   case ISD::AND:
19267     return performANDCombine(N, DCI);
19268   case ISD::INTRINSIC_WO_CHAIN:
19269     return performIntrinsicCombine(N, DCI, Subtarget);
19270   case ISD::ANY_EXTEND:
19271   case ISD::ZERO_EXTEND:
19272   case ISD::SIGN_EXTEND:
19273     return performExtendCombine(N, DCI, DAG);
19274   case ISD::SIGN_EXTEND_INREG:
19275     return performSignExtendInRegCombine(N, DCI, DAG);
19276   case ISD::CONCAT_VECTORS:
19277     return performConcatVectorsCombine(N, DCI, DAG);
19278   case ISD::EXTRACT_SUBVECTOR:
19279     return performExtractSubvectorCombine(N, DCI, DAG);
19280   case ISD::INSERT_SUBVECTOR:
19281     return performInsertSubvectorCombine(N, DCI, DAG);
19282   case ISD::SELECT:
19283     return performSelectCombine(N, DCI);
19284   case ISD::VSELECT:
19285     return performVSelectCombine(N, DCI.DAG);
19286   case ISD::SETCC:
19287     return performSETCCCombine(N, DAG);
19288   case ISD::LOAD:
19289     if (performTBISimplification(N->getOperand(1), DCI, DAG))
19290       return SDValue(N, 0);
19291     break;
19292   case ISD::STORE:
19293     return performSTORECombine(N, DCI, DAG, Subtarget);
19294   case ISD::MGATHER:
19295   case ISD::MSCATTER:
19296     return performMaskedGatherScatterCombine(N, DCI, DAG);
19297   case ISD::VECTOR_SPLICE:
19298     return performSVESpliceCombine(N, DAG);
19299   case ISD::FP_EXTEND:
19300     return performFPExtendCombine(N, DAG, DCI, Subtarget);
19301   case AArch64ISD::BRCOND:
19302     return performBRCONDCombine(N, DCI, DAG);
19303   case AArch64ISD::TBNZ:
19304   case AArch64ISD::TBZ:
19305     return performTBZCombine(N, DCI, DAG);
19306   case AArch64ISD::CSEL:
19307     return performCSELCombine(N, DCI, DAG);
19308   case AArch64ISD::DUP:
19309     return performDUPCombine(N, DCI);
19310   case AArch64ISD::NVCAST:
19311     return performNVCASTCombine(N);
19312   case AArch64ISD::SPLICE:
19313     return performSpliceCombine(N, DAG);
19314   case AArch64ISD::UUNPKLO:
19315   case AArch64ISD::UUNPKHI:
19316     return performUnpackCombine(N, DAG);
19317   case AArch64ISD::UZP1:
19318     return performUzpCombine(N, DAG);
19319   case AArch64ISD::SETCC_MERGE_ZERO:
19320     return performSetccMergeZeroCombine(N, DCI);
19321   case AArch64ISD::GLD1_MERGE_ZERO:
19322   case AArch64ISD::GLD1_SCALED_MERGE_ZERO:
19323   case AArch64ISD::GLD1_UXTW_MERGE_ZERO:
19324   case AArch64ISD::GLD1_SXTW_MERGE_ZERO:
19325   case AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO:
19326   case AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO:
19327   case AArch64ISD::GLD1_IMM_MERGE_ZERO:
19328   case AArch64ISD::GLD1S_MERGE_ZERO:
19329   case AArch64ISD::GLD1S_SCALED_MERGE_ZERO:
19330   case AArch64ISD::GLD1S_UXTW_MERGE_ZERO:
19331   case AArch64ISD::GLD1S_SXTW_MERGE_ZERO:
19332   case AArch64ISD::GLD1S_UXTW_SCALED_MERGE_ZERO:
19333   case AArch64ISD::GLD1S_SXTW_SCALED_MERGE_ZERO:
19334   case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
19335     return performGLD1Combine(N, DAG);
19336   case AArch64ISD::VASHR:
19337   case AArch64ISD::VLSHR:
19338     return performVectorShiftCombine(N, *this, DCI);
19339   case AArch64ISD::SUNPKLO:
19340     return performSunpkloCombine(N, DAG);
19341   case AArch64ISD::BSP:
19342     return performBSPExpandForSVE(
19343         N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0)));
19344   case ISD::INSERT_VECTOR_ELT:
19345     return performInsertVectorEltCombine(N, DCI);
19346   case ISD::EXTRACT_VECTOR_ELT:
19347     return performExtractVectorEltCombine(N, DCI, Subtarget);
19348   case ISD::VECREDUCE_ADD:
19349     return performVecReduceAddCombine(N, DCI.DAG, Subtarget);
19350   case AArch64ISD::UADDV:
19351     return performUADDVCombine(N, DAG);
19352   case AArch64ISD::SMULL:
19353   case AArch64ISD::UMULL:
19354     return tryCombineLongOpWithDup(Intrinsic::not_intrinsic, N, DCI, DAG);
19355   case ISD::INTRINSIC_VOID:
19356   case ISD::INTRINSIC_W_CHAIN:
19357     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
19358     case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
19359       return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
19360     case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
19361       return combineSVEPrefetchVecBaseImmOff(N, DAG, 2 /*=ScalarSizeInBytes*/);
19362     case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
19363       return combineSVEPrefetchVecBaseImmOff(N, DAG, 4 /*=ScalarSizeInBytes*/);
19364     case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
19365       return combineSVEPrefetchVecBaseImmOff(N, DAG, 8 /*=ScalarSizeInBytes*/);
19366     case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
19367     case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
19368     case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
19369     case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
19370     case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
19371     case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
19372     case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
19373     case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
19374       return legalizeSVEGatherPrefetchOffsVec(N, DAG);
19375     case Intrinsic::aarch64_neon_ld2:
19376     case Intrinsic::aarch64_neon_ld3:
19377     case Intrinsic::aarch64_neon_ld4:
19378     case Intrinsic::aarch64_neon_ld1x2:
19379     case Intrinsic::aarch64_neon_ld1x3:
19380     case Intrinsic::aarch64_neon_ld1x4:
19381     case Intrinsic::aarch64_neon_ld2lane:
19382     case Intrinsic::aarch64_neon_ld3lane:
19383     case Intrinsic::aarch64_neon_ld4lane:
19384     case Intrinsic::aarch64_neon_ld2r:
19385     case Intrinsic::aarch64_neon_ld3r:
19386     case Intrinsic::aarch64_neon_ld4r:
19387     case Intrinsic::aarch64_neon_st2:
19388     case Intrinsic::aarch64_neon_st3:
19389     case Intrinsic::aarch64_neon_st4:
19390     case Intrinsic::aarch64_neon_st1x2:
19391     case Intrinsic::aarch64_neon_st1x3:
19392     case Intrinsic::aarch64_neon_st1x4:
19393     case Intrinsic::aarch64_neon_st2lane:
19394     case Intrinsic::aarch64_neon_st3lane:
19395     case Intrinsic::aarch64_neon_st4lane:
19396       return performNEONPostLDSTCombine(N, DCI, DAG);
19397     case Intrinsic::aarch64_sve_ldnt1:
19398       return performLDNT1Combine(N, DAG);
19399     case Intrinsic::aarch64_sve_ld1rq:
19400       return performLD1ReplicateCombine<AArch64ISD::LD1RQ_MERGE_ZERO>(N, DAG);
19401     case Intrinsic::aarch64_sve_ld1ro:
19402       return performLD1ReplicateCombine<AArch64ISD::LD1RO_MERGE_ZERO>(N, DAG);
19403     case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
19404       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19405     case Intrinsic::aarch64_sve_ldnt1_gather:
19406       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19407     case Intrinsic::aarch64_sve_ldnt1_gather_index:
19408       return performGatherLoadCombine(N, DAG,
19409                                       AArch64ISD::GLDNT1_INDEX_MERGE_ZERO);
19410     case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
19411       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_MERGE_ZERO);
19412     case Intrinsic::aarch64_sve_ld1:
19413       return performLD1Combine(N, DAG, AArch64ISD::LD1_MERGE_ZERO);
19414     case Intrinsic::aarch64_sve_ldnf1:
19415       return performLD1Combine(N, DAG, AArch64ISD::LDNF1_MERGE_ZERO);
19416     case Intrinsic::aarch64_sve_ldff1:
19417       return performLD1Combine(N, DAG, AArch64ISD::LDFF1_MERGE_ZERO);
19418     case Intrinsic::aarch64_sve_st1:
19419       return performST1Combine(N, DAG);
19420     case Intrinsic::aarch64_sve_stnt1:
19421       return performSTNT1Combine(N, DAG);
19422     case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
19423       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19424     case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
19425       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19426     case Intrinsic::aarch64_sve_stnt1_scatter:
19427       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_PRED);
19428     case Intrinsic::aarch64_sve_stnt1_scatter_index:
19429       return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1_INDEX_PRED);
19430     case Intrinsic::aarch64_sve_ld1_gather:
19431       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_MERGE_ZERO);
19432     case Intrinsic::aarch64_sve_ld1_gather_index:
19433       return performGatherLoadCombine(N, DAG,
19434                                       AArch64ISD::GLD1_SCALED_MERGE_ZERO);
19435     case Intrinsic::aarch64_sve_ld1_gather_sxtw:
19436       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_SXTW_MERGE_ZERO,
19437                                       /*OnlyPackedOffsets=*/false);
19438     case Intrinsic::aarch64_sve_ld1_gather_uxtw:
19439       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_UXTW_MERGE_ZERO,
19440                                       /*OnlyPackedOffsets=*/false);
19441     case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
19442       return performGatherLoadCombine(N, DAG,
19443                                       AArch64ISD::GLD1_SXTW_SCALED_MERGE_ZERO,
19444                                       /*OnlyPackedOffsets=*/false);
19445     case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
19446       return performGatherLoadCombine(N, DAG,
19447                                       AArch64ISD::GLD1_UXTW_SCALED_MERGE_ZERO,
19448                                       /*OnlyPackedOffsets=*/false);
19449     case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
19450       return performGatherLoadCombine(N, DAG, AArch64ISD::GLD1_IMM_MERGE_ZERO);
19451     case Intrinsic::aarch64_sve_ldff1_gather:
19452       return performGatherLoadCombine(N, DAG, AArch64ISD::GLDFF1_MERGE_ZERO);
19453     case Intrinsic::aarch64_sve_ldff1_gather_index:
19454       return performGatherLoadCombine(N, DAG,
19455                                       AArch64ISD::GLDFF1_SCALED_MERGE_ZERO);
19456     case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
19457       return performGatherLoadCombine(N, DAG,
19458                                       AArch64ISD::GLDFF1_SXTW_MERGE_ZERO,
19459                                       /*OnlyPackedOffsets=*/false);
19460     case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
19461       return performGatherLoadCombine(N, DAG,
19462                                       AArch64ISD::GLDFF1_UXTW_MERGE_ZERO,
19463                                       /*OnlyPackedOffsets=*/false);
19464     case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
19465       return performGatherLoadCombine(N, DAG,
19466                                       AArch64ISD::GLDFF1_SXTW_SCALED_MERGE_ZERO,
19467                                       /*OnlyPackedOffsets=*/false);
19468     case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
19469       return performGatherLoadCombine(N, DAG,
19470                                       AArch64ISD::GLDFF1_UXTW_SCALED_MERGE_ZERO,
19471                                       /*OnlyPackedOffsets=*/false);
19472     case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
19473       return performGatherLoadCombine(N, DAG,
19474                                       AArch64ISD::GLDFF1_IMM_MERGE_ZERO);
19475     case Intrinsic::aarch64_sve_st1_scatter:
19476       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_PRED);
19477     case Intrinsic::aarch64_sve_st1_scatter_index:
19478       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SCALED_PRED);
19479     case Intrinsic::aarch64_sve_st1_scatter_sxtw:
19480       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_SXTW_PRED,
19481                                         /*OnlyPackedOffsets=*/false);
19482     case Intrinsic::aarch64_sve_st1_scatter_uxtw:
19483       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_UXTW_PRED,
19484                                         /*OnlyPackedOffsets=*/false);
19485     case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
19486       return performScatterStoreCombine(N, DAG,
19487                                         AArch64ISD::SST1_SXTW_SCALED_PRED,
19488                                         /*OnlyPackedOffsets=*/false);
19489     case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
19490       return performScatterStoreCombine(N, DAG,
19491                                         AArch64ISD::SST1_UXTW_SCALED_PRED,
19492                                         /*OnlyPackedOffsets=*/false);
19493     case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
19494       return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
19495     case Intrinsic::aarch64_sve_tuple_get: {
19496       SDLoc DL(N);
19497       SDValue Chain = N->getOperand(0);
19498       SDValue Src1 = N->getOperand(2);
19499       SDValue Idx = N->getOperand(3);
19500 
19501       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19502       EVT ResVT = N->getValueType(0);
19503       uint64_t NumLanes = ResVT.getVectorElementCount().getKnownMinValue();
19504       SDValue ExtIdx = DAG.getVectorIdxConstant(IdxConst * NumLanes, DL);
19505       SDValue Val =
19506           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Src1, ExtIdx);
19507       return DAG.getMergeValues({Val, Chain}, DL);
19508     }
19509     case Intrinsic::aarch64_sve_tuple_set: {
19510       SDLoc DL(N);
19511       SDValue Chain = N->getOperand(0);
19512       SDValue Tuple = N->getOperand(2);
19513       SDValue Idx = N->getOperand(3);
19514       SDValue Vec = N->getOperand(4);
19515 
19516       EVT TupleVT = Tuple.getValueType();
19517       uint64_t TupleLanes = TupleVT.getVectorElementCount().getKnownMinValue();
19518 
19519       uint64_t IdxConst = cast<ConstantSDNode>(Idx)->getZExtValue();
19520       uint64_t NumLanes =
19521           Vec.getValueType().getVectorElementCount().getKnownMinValue();
19522 
19523       if ((TupleLanes % NumLanes) != 0)
19524         report_fatal_error("invalid tuple vector!");
19525 
19526       uint64_t NumVecs = TupleLanes / NumLanes;
19527 
19528       SmallVector<SDValue, 4> Opnds;
19529       for (unsigned I = 0; I < NumVecs; ++I) {
19530         if (I == IdxConst)
19531           Opnds.push_back(Vec);
19532         else {
19533           SDValue ExtIdx = DAG.getVectorIdxConstant(I * NumLanes, DL);
19534           Opnds.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
19535                                       Vec.getValueType(), Tuple, ExtIdx));
19536         }
19537       }
19538       SDValue Concat =
19539           DAG.getNode(ISD::CONCAT_VECTORS, DL, Tuple.getValueType(), Opnds);
19540       return DAG.getMergeValues({Concat, Chain}, DL);
19541     }
19542     case Intrinsic::aarch64_sve_tuple_create2:
19543     case Intrinsic::aarch64_sve_tuple_create3:
19544     case Intrinsic::aarch64_sve_tuple_create4: {
19545       SDLoc DL(N);
19546       SDValue Chain = N->getOperand(0);
19547 
19548       SmallVector<SDValue, 4> Opnds;
19549       for (unsigned I = 2; I < N->getNumOperands(); ++I)
19550         Opnds.push_back(N->getOperand(I));
19551 
19552       EVT VT = Opnds[0].getValueType();
19553       EVT EltVT = VT.getVectorElementType();
19554       EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
19555                                     VT.getVectorElementCount() *
19556                                         (N->getNumOperands() - 2));
19557       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds);
19558       return DAG.getMergeValues({Concat, Chain}, DL);
19559     }
19560     case Intrinsic::aarch64_sve_ld2:
19561     case Intrinsic::aarch64_sve_ld3:
19562     case Intrinsic::aarch64_sve_ld4: {
19563       SDLoc DL(N);
19564       SDValue Chain = N->getOperand(0);
19565       SDValue Mask = N->getOperand(2);
19566       SDValue BasePtr = N->getOperand(3);
19567       SDValue LoadOps[] = {Chain, Mask, BasePtr};
19568       unsigned IntrinsicID =
19569           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19570       SDValue Result =
19571           LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL);
19572       return DAG.getMergeValues({Result, Chain}, DL);
19573     }
19574     case Intrinsic::aarch64_rndr:
19575     case Intrinsic::aarch64_rndrrs: {
19576       unsigned IntrinsicID =
19577           cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19578       auto Register =
19579           (IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
19580                                                   : AArch64SysReg::RNDRRS);
19581       SDLoc DL(N);
19582       SDValue A = DAG.getNode(
19583           AArch64ISD::MRS, DL, DAG.getVTList(MVT::i64, MVT::Glue, MVT::Other),
19584           N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64));
19585       SDValue B = DAG.getNode(
19586           AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32),
19587           DAG.getConstant(0, DL, MVT::i32),
19588           DAG.getConstant(AArch64CC::NE, DL, MVT::i32), A.getValue(1));
19589       return DAG.getMergeValues(
19590           {A, DAG.getZExtOrTrunc(B, DL, MVT::i1), A.getValue(2)}, DL);
19591     }
19592     default:
19593       break;
19594     }
19595     break;
19596   case ISD::GlobalAddress:
19597     return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine());
19598   }
19599   return SDValue();
19600 }
19601 
19602 // Check if the return value is used as only a return value, as otherwise
19603 // we can't perform a tail-call. In particular, we need to check for
19604 // target ISD nodes that are returns and any other "odd" constructs
19605 // that the generic analysis code won't necessarily catch.
19606 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
19607                                                SDValue &Chain) const {
19608   if (N->getNumValues() != 1)
19609     return false;
19610   if (!N->hasNUsesOfValue(1, 0))
19611     return false;
19612 
19613   SDValue TCChain = Chain;
19614   SDNode *Copy = *N->use_begin();
19615   if (Copy->getOpcode() == ISD::CopyToReg) {
19616     // If the copy has a glue operand, we conservatively assume it isn't safe to
19617     // perform a tail call.
19618     if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
19619         MVT::Glue)
19620       return false;
19621     TCChain = Copy->getOperand(0);
19622   } else if (Copy->getOpcode() != ISD::FP_EXTEND)
19623     return false;
19624 
19625   bool HasRet = false;
19626   for (SDNode *Node : Copy->uses()) {
19627     if (Node->getOpcode() != AArch64ISD::RET_FLAG)
19628       return false;
19629     HasRet = true;
19630   }
19631 
19632   if (!HasRet)
19633     return false;
19634 
19635   Chain = TCChain;
19636   return true;
19637 }
19638 
19639 // Return whether the an instruction can potentially be optimized to a tail
19640 // call. This will cause the optimizers to attempt to move, or duplicate,
19641 // return instructions to help enable tail call optimizations for this
19642 // instruction.
19643 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
19644   return CI->isTailCall();
19645 }
19646 
19647 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
19648                                                    SDValue &Offset,
19649                                                    ISD::MemIndexedMode &AM,
19650                                                    bool &IsInc,
19651                                                    SelectionDAG &DAG) const {
19652   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
19653     return false;
19654 
19655   Base = Op->getOperand(0);
19656   // All of the indexed addressing mode instructions take a signed
19657   // 9 bit immediate offset.
19658   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
19659     int64_t RHSC = RHS->getSExtValue();
19660     if (Op->getOpcode() == ISD::SUB)
19661       RHSC = -(uint64_t)RHSC;
19662     if (!isInt<9>(RHSC))
19663       return false;
19664     IsInc = (Op->getOpcode() == ISD::ADD);
19665     Offset = Op->getOperand(1);
19666     return true;
19667   }
19668   return false;
19669 }
19670 
19671 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
19672                                                       SDValue &Offset,
19673                                                       ISD::MemIndexedMode &AM,
19674                                                       SelectionDAG &DAG) const {
19675   EVT VT;
19676   SDValue Ptr;
19677   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19678     VT = LD->getMemoryVT();
19679     Ptr = LD->getBasePtr();
19680   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19681     VT = ST->getMemoryVT();
19682     Ptr = ST->getBasePtr();
19683   } else
19684     return false;
19685 
19686   bool IsInc;
19687   if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
19688     return false;
19689   AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
19690   return true;
19691 }
19692 
19693 bool AArch64TargetLowering::getPostIndexedAddressParts(
19694     SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
19695     ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {
19696   EVT VT;
19697   SDValue Ptr;
19698   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19699     VT = LD->getMemoryVT();
19700     Ptr = LD->getBasePtr();
19701   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19702     VT = ST->getMemoryVT();
19703     Ptr = ST->getBasePtr();
19704   } else
19705     return false;
19706 
19707   bool IsInc;
19708   if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
19709     return false;
19710   // Post-indexing updates the base, so it's not a valid transform
19711   // if that's not the same as the load's pointer.
19712   if (Ptr != Base)
19713     return false;
19714   AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
19715   return true;
19716 }
19717 
19718 void AArch64TargetLowering::ReplaceBITCASTResults(
19719     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19720   SDLoc DL(N);
19721   SDValue Op = N->getOperand(0);
19722   EVT VT = N->getValueType(0);
19723   EVT SrcVT = Op.getValueType();
19724 
19725   if (VT.isScalableVector() && !isTypeLegal(VT) && isTypeLegal(SrcVT)) {
19726     assert(!VT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
19727            "Expected fp->int bitcast!");
19728 
19729     // Bitcasting between unpacked vector types of different element counts is
19730     // not a NOP because the live elements are laid out differently.
19731     //                01234567
19732     // e.g. nxv2i32 = XX??XX??
19733     //      nxv4f16 = X?X?X?X?
19734     if (VT.getVectorElementCount() != SrcVT.getVectorElementCount())
19735       return;
19736 
19737     SDValue CastResult = getSVESafeBitCast(getSVEContainerType(VT), Op, DAG);
19738     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, CastResult));
19739     return;
19740   }
19741 
19742   if (VT != MVT::i16 || (SrcVT != MVT::f16 && SrcVT != MVT::bf16))
19743     return;
19744 
19745   Op = SDValue(
19746       DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
19747                          DAG.getUNDEF(MVT::i32), Op,
19748                          DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
19749       0);
19750   Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
19751   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
19752 }
19753 
19754 static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
19755                                SelectionDAG &DAG,
19756                                const AArch64Subtarget *Subtarget) {
19757   EVT VT = N->getValueType(0);
19758   if (!VT.is256BitVector() ||
19759       (VT.getScalarType().isFloatingPoint() &&
19760        !N->getFlags().hasAllowReassociation()) ||
19761       (VT.getScalarType() == MVT::f16 && !Subtarget->hasFullFP16()))
19762     return;
19763 
19764   SDValue X = N->getOperand(0);
19765   auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(1));
19766   if (!Shuf) {
19767     Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0));
19768     X = N->getOperand(1);
19769     if (!Shuf)
19770       return;
19771   }
19772 
19773   if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
19774     return;
19775 
19776   // Check the mask is 1,0,3,2,5,4,...
19777   ArrayRef<int> Mask = Shuf->getMask();
19778   for (int I = 0, E = Mask.size(); I < E; I++)
19779     if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1))
19780       return;
19781 
19782   SDLoc DL(N);
19783   auto LoHi = DAG.SplitVector(X, DL);
19784   assert(LoHi.first.getValueType() == LoHi.second.getValueType());
19785   SDValue Addp = DAG.getNode(AArch64ISD::ADDP, N, LoHi.first.getValueType(),
19786                              LoHi.first, LoHi.second);
19787 
19788   // Shuffle the elements back into order.
19789   SmallVector<int> NMask;
19790   for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) {
19791     NMask.push_back(I);
19792     NMask.push_back(I);
19793   }
19794   Results.push_back(
19795       DAG.getVectorShuffle(VT, DL,
19796                            DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Addp,
19797                                        DAG.getUNDEF(LoHi.first.getValueType())),
19798                            DAG.getUNDEF(VT), NMask));
19799 }
19800 
19801 static void ReplaceReductionResults(SDNode *N,
19802                                     SmallVectorImpl<SDValue> &Results,
19803                                     SelectionDAG &DAG, unsigned InterOp,
19804                                     unsigned AcrossOp) {
19805   EVT LoVT, HiVT;
19806   SDValue Lo, Hi;
19807   SDLoc dl(N);
19808   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
19809   std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
19810   SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi);
19811   SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal);
19812   Results.push_back(SplitVal);
19813 }
19814 
19815 static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) {
19816   SDLoc DL(N);
19817   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N);
19818   SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64,
19819                            DAG.getNode(ISD::SRL, DL, MVT::i128, N,
19820                                        DAG.getConstant(64, DL, MVT::i64)));
19821   return std::make_pair(Lo, Hi);
19822 }
19823 
19824 void AArch64TargetLowering::ReplaceExtractSubVectorResults(
19825     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19826   SDValue In = N->getOperand(0);
19827   EVT InVT = In.getValueType();
19828 
19829   // Common code will handle these just fine.
19830   if (!InVT.isScalableVector() || !InVT.isInteger())
19831     return;
19832 
19833   SDLoc DL(N);
19834   EVT VT = N->getValueType(0);
19835 
19836   // The following checks bail if this is not a halving operation.
19837 
19838   ElementCount ResEC = VT.getVectorElementCount();
19839 
19840   if (InVT.getVectorElementCount() != (ResEC * 2))
19841     return;
19842 
19843   auto *CIndex = dyn_cast<ConstantSDNode>(N->getOperand(1));
19844   if (!CIndex)
19845     return;
19846 
19847   unsigned Index = CIndex->getZExtValue();
19848   if ((Index != 0) && (Index != ResEC.getKnownMinValue()))
19849     return;
19850 
19851   unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI;
19852   EVT ExtendedHalfVT = VT.widenIntegerVectorElementType(*DAG.getContext());
19853 
19854   SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0));
19855   Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
19856 }
19857 
19858 // Create an even/odd pair of X registers holding integer value V.
19859 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
19860   SDLoc dl(V.getNode());
19861   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64);
19862   SDValue VHi = DAG.getAnyExtOrTrunc(
19863       DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)),
19864       dl, MVT::i64);
19865   if (DAG.getDataLayout().isBigEndian())
19866     std::swap (VLo, VHi);
19867   SDValue RegClass =
19868       DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32);
19869   SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32);
19870   SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32);
19871   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
19872   return SDValue(
19873       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
19874 }
19875 
19876 static void ReplaceCMP_SWAP_128Results(SDNode *N,
19877                                        SmallVectorImpl<SDValue> &Results,
19878                                        SelectionDAG &DAG,
19879                                        const AArch64Subtarget *Subtarget) {
19880   assert(N->getValueType(0) == MVT::i128 &&
19881          "AtomicCmpSwap on types less than 128 should be legal");
19882 
19883   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
19884   if (Subtarget->hasLSE() || Subtarget->outlineAtomics()) {
19885     // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type,
19886     // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG.
19887     SDValue Ops[] = {
19888         createGPRPairNode(DAG, N->getOperand(2)), // Compare value
19889         createGPRPairNode(DAG, N->getOperand(3)), // Store value
19890         N->getOperand(1), // Ptr
19891         N->getOperand(0), // Chain in
19892     };
19893 
19894     unsigned Opcode;
19895     switch (MemOp->getMergedOrdering()) {
19896     case AtomicOrdering::Monotonic:
19897       Opcode = AArch64::CASPX;
19898       break;
19899     case AtomicOrdering::Acquire:
19900       Opcode = AArch64::CASPAX;
19901       break;
19902     case AtomicOrdering::Release:
19903       Opcode = AArch64::CASPLX;
19904       break;
19905     case AtomicOrdering::AcquireRelease:
19906     case AtomicOrdering::SequentiallyConsistent:
19907       Opcode = AArch64::CASPALX;
19908       break;
19909     default:
19910       llvm_unreachable("Unexpected ordering!");
19911     }
19912 
19913     MachineSDNode *CmpSwap = DAG.getMachineNode(
19914         Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops);
19915     DAG.setNodeMemRefs(CmpSwap, {MemOp});
19916 
19917     unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64;
19918     if (DAG.getDataLayout().isBigEndian())
19919       std::swap(SubReg1, SubReg2);
19920     SDValue Lo = DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64,
19921                                             SDValue(CmpSwap, 0));
19922     SDValue Hi = DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64,
19923                                             SDValue(CmpSwap, 0));
19924     Results.push_back(
19925         DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128, Lo, Hi));
19926     Results.push_back(SDValue(CmpSwap, 1)); // Chain out
19927     return;
19928   }
19929 
19930   unsigned Opcode;
19931   switch (MemOp->getMergedOrdering()) {
19932   case AtomicOrdering::Monotonic:
19933     Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
19934     break;
19935   case AtomicOrdering::Acquire:
19936     Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
19937     break;
19938   case AtomicOrdering::Release:
19939     Opcode = AArch64::CMP_SWAP_128_RELEASE;
19940     break;
19941   case AtomicOrdering::AcquireRelease:
19942   case AtomicOrdering::SequentiallyConsistent:
19943     Opcode = AArch64::CMP_SWAP_128;
19944     break;
19945   default:
19946     llvm_unreachable("Unexpected ordering!");
19947   }
19948 
19949   auto Desired = splitInt128(N->getOperand(2), DAG);
19950   auto New = splitInt128(N->getOperand(3), DAG);
19951   SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second,
19952                    New.first,        New.second,    N->getOperand(0)};
19953   SDNode *CmpSwap = DAG.getMachineNode(
19954       Opcode, SDLoc(N), DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other),
19955       Ops);
19956   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
19957 
19958   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
19959                                 SDValue(CmpSwap, 0), SDValue(CmpSwap, 1)));
19960   Results.push_back(SDValue(CmpSwap, 3));
19961 }
19962 
19963 void AArch64TargetLowering::ReplaceNodeResults(
19964     SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
19965   switch (N->getOpcode()) {
19966   default:
19967     llvm_unreachable("Don't know how to custom expand this");
19968   case ISD::BITCAST:
19969     ReplaceBITCASTResults(N, Results, DAG);
19970     return;
19971   case ISD::VECREDUCE_ADD:
19972   case ISD::VECREDUCE_SMAX:
19973   case ISD::VECREDUCE_SMIN:
19974   case ISD::VECREDUCE_UMAX:
19975   case ISD::VECREDUCE_UMIN:
19976     Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
19977     return;
19978   case ISD::ADD:
19979   case ISD::FADD:
19980     ReplaceAddWithADDP(N, Results, DAG, Subtarget);
19981     return;
19982 
19983   case ISD::CTPOP:
19984     if (SDValue Result = LowerCTPOP(SDValue(N, 0), DAG))
19985       Results.push_back(Result);
19986     return;
19987   case AArch64ISD::SADDV:
19988     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
19989     return;
19990   case AArch64ISD::UADDV:
19991     ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV);
19992     return;
19993   case AArch64ISD::SMINV:
19994     ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV);
19995     return;
19996   case AArch64ISD::UMINV:
19997     ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV);
19998     return;
19999   case AArch64ISD::SMAXV:
20000     ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV);
20001     return;
20002   case AArch64ISD::UMAXV:
20003     ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV);
20004     return;
20005   case ISD::FP_TO_UINT:
20006   case ISD::FP_TO_SINT:
20007   case ISD::STRICT_FP_TO_SINT:
20008   case ISD::STRICT_FP_TO_UINT:
20009     assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
20010     // Let normal code take care of it by not adding anything to Results.
20011     return;
20012   case ISD::ATOMIC_CMP_SWAP:
20013     ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget);
20014     return;
20015   case ISD::ATOMIC_LOAD:
20016   case ISD::LOAD: {
20017     assert(SDValue(N, 0).getValueType() == MVT::i128 &&
20018            "unexpected load's value type");
20019     MemSDNode *LoadNode = cast<MemSDNode>(N);
20020     if ((!LoadNode->isVolatile() && !LoadNode->isAtomic()) ||
20021         LoadNode->getMemoryVT() != MVT::i128) {
20022       // Non-volatile or atomic loads are optimized later in AArch64's load/store
20023       // optimizer.
20024       return;
20025     }
20026 
20027     SDValue Result = DAG.getMemIntrinsicNode(
20028         AArch64ISD::LDP, SDLoc(N),
20029         DAG.getVTList({MVT::i64, MVT::i64, MVT::Other}),
20030         {LoadNode->getChain(), LoadNode->getBasePtr()}, LoadNode->getMemoryVT(),
20031         LoadNode->getMemOperand());
20032 
20033     SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i128,
20034                                Result.getValue(0), Result.getValue(1));
20035     Results.append({Pair, Result.getValue(2) /* Chain */});
20036     return;
20037   }
20038   case ISD::EXTRACT_SUBVECTOR:
20039     ReplaceExtractSubVectorResults(N, Results, DAG);
20040     return;
20041   case ISD::INSERT_SUBVECTOR:
20042   case ISD::CONCAT_VECTORS:
20043     // Custom lowering has been requested for INSERT_SUBVECTOR and
20044     // CONCAT_VECTORS -- but delegate to common code for result type
20045     // legalisation
20046     return;
20047   case ISD::INTRINSIC_WO_CHAIN: {
20048     EVT VT = N->getValueType(0);
20049     assert((VT == MVT::i8 || VT == MVT::i16) &&
20050            "custom lowering for unexpected type");
20051 
20052     ConstantSDNode *CN = cast<ConstantSDNode>(N->getOperand(0));
20053     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
20054     switch (IntID) {
20055     default:
20056       return;
20057     case Intrinsic::aarch64_sve_clasta_n: {
20058       SDLoc DL(N);
20059       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20060       auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32,
20061                            N->getOperand(1), Op2, N->getOperand(3));
20062       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20063       return;
20064     }
20065     case Intrinsic::aarch64_sve_clastb_n: {
20066       SDLoc DL(N);
20067       auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2));
20068       auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32,
20069                            N->getOperand(1), Op2, N->getOperand(3));
20070       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20071       return;
20072     }
20073     case Intrinsic::aarch64_sve_lasta: {
20074       SDLoc DL(N);
20075       auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32,
20076                            N->getOperand(1), N->getOperand(2));
20077       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20078       return;
20079     }
20080     case Intrinsic::aarch64_sve_lastb: {
20081       SDLoc DL(N);
20082       auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32,
20083                            N->getOperand(1), N->getOperand(2));
20084       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
20085       return;
20086     }
20087     }
20088   }
20089   }
20090 }
20091 
20092 bool AArch64TargetLowering::useLoadStackGuardNode() const {
20093   if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
20094     return TargetLowering::useLoadStackGuardNode();
20095   return true;
20096 }
20097 
20098 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
20099   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
20100   // reciprocal if there are three or more FDIVs.
20101   return 3;
20102 }
20103 
20104 TargetLoweringBase::LegalizeTypeAction
20105 AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {
20106   // During type legalization, we prefer to widen v1i8, v1i16, v1i32  to v8i8,
20107   // v4i16, v2i32 instead of to promote.
20108   if (VT == MVT::v1i8 || VT == MVT::v1i16 || VT == MVT::v1i32 ||
20109       VT == MVT::v1f32)
20110     return TypeWidenVector;
20111 
20112   return TargetLoweringBase::getPreferredVectorAction(VT);
20113 }
20114 
20115 // In v8.4a, ldp and stp instructions are guaranteed to be single-copy atomic
20116 // provided the address is 16-byte aligned.
20117 bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {
20118   if (!Subtarget->hasLSE2())
20119     return false;
20120 
20121   if (auto LI = dyn_cast<LoadInst>(I))
20122     return LI->getType()->getPrimitiveSizeInBits() == 128 &&
20123            LI->getAlign() >= Align(16);
20124 
20125   if (auto SI = dyn_cast<StoreInst>(I))
20126     return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128 &&
20127            SI->getAlign() >= Align(16);
20128 
20129   return false;
20130 }
20131 
20132 bool AArch64TargetLowering::shouldInsertFencesForAtomic(
20133     const Instruction *I) const {
20134   return isOpSuitableForLDPSTP(I);
20135 }
20136 
20137 // Loads and stores less than 128-bits are already atomic; ones above that
20138 // are doomed anyway, so defer to the default libcall and blame the OS when
20139 // things go wrong.
20140 TargetLoweringBase::AtomicExpansionKind
20141 AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
20142   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
20143   if (Size != 128 || isOpSuitableForLDPSTP(SI))
20144     return AtomicExpansionKind::None;
20145   return AtomicExpansionKind::Expand;
20146 }
20147 
20148 // Loads and stores less than 128-bits are already atomic; ones above that
20149 // are doomed anyway, so defer to the default libcall and blame the OS when
20150 // things go wrong.
20151 TargetLowering::AtomicExpansionKind
20152 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
20153   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
20154 
20155   if (Size != 128 || isOpSuitableForLDPSTP(LI))
20156     return AtomicExpansionKind::None;
20157 
20158   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20159   // implement atomicrmw without spilling. If the target address is also on the
20160   // stack and close enough to the spill slot, this can lead to a situation
20161   // where the monitor always gets cleared and the atomic operation can never
20162   // succeed. So at -O0 lower this operation to a CAS loop.
20163   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20164     return AtomicExpansionKind::CmpXChg;
20165 
20166   return AtomicExpansionKind::LLSC;
20167 }
20168 
20169 // For the real atomic operations, we have ldxr/stxr up to 128 bits,
20170 TargetLowering::AtomicExpansionKind
20171 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
20172   if (AI->isFloatingPointOperation())
20173     return AtomicExpansionKind::CmpXChg;
20174 
20175   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
20176   if (Size > 128) return AtomicExpansionKind::None;
20177 
20178   // Nand is not supported in LSE.
20179   // Leave 128 bits to LLSC or CmpXChg.
20180   if (AI->getOperation() != AtomicRMWInst::Nand && Size < 128) {
20181     if (Subtarget->hasLSE())
20182       return AtomicExpansionKind::None;
20183     if (Subtarget->outlineAtomics()) {
20184       // [U]Min/[U]Max RWM atomics are used in __sync_fetch_ libcalls so far.
20185       // Don't outline them unless
20186       // (1) high level <atomic> support approved:
20187       //   http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p0493r1.pdf
20188       // (2) low level libgcc and compiler-rt support implemented by:
20189       //   min/max outline atomics helpers
20190       if (AI->getOperation() != AtomicRMWInst::Min &&
20191           AI->getOperation() != AtomicRMWInst::Max &&
20192           AI->getOperation() != AtomicRMWInst::UMin &&
20193           AI->getOperation() != AtomicRMWInst::UMax) {
20194         return AtomicExpansionKind::None;
20195       }
20196     }
20197   }
20198 
20199   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20200   // implement atomicrmw without spilling. If the target address is also on the
20201   // stack and close enough to the spill slot, this can lead to a situation
20202   // where the monitor always gets cleared and the atomic operation can never
20203   // succeed. So at -O0 lower this operation to a CAS loop.
20204   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20205     return AtomicExpansionKind::CmpXChg;
20206 
20207   return AtomicExpansionKind::LLSC;
20208 }
20209 
20210 TargetLowering::AtomicExpansionKind
20211 AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
20212     AtomicCmpXchgInst *AI) const {
20213   // If subtarget has LSE, leave cmpxchg intact for codegen.
20214   if (Subtarget->hasLSE() || Subtarget->outlineAtomics())
20215     return AtomicExpansionKind::None;
20216   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
20217   // implement cmpxchg without spilling. If the address being exchanged is also
20218   // on the stack and close enough to the spill slot, this can lead to a
20219   // situation where the monitor always gets cleared and the atomic operation
20220   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
20221   if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
20222     return AtomicExpansionKind::None;
20223 
20224   // 128-bit atomic cmpxchg is weird; AtomicExpand doesn't know how to expand
20225   // it.
20226   unsigned Size = AI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
20227   if (Size > 64)
20228     return AtomicExpansionKind::None;
20229 
20230   return AtomicExpansionKind::LLSC;
20231 }
20232 
20233 Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
20234                                              Type *ValueTy, Value *Addr,
20235                                              AtomicOrdering Ord) const {
20236   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20237   bool IsAcquire = isAcquireOrStronger(Ord);
20238 
20239   // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
20240   // intrinsic must return {i64, i64} and we have to recombine them into a
20241   // single i128 here.
20242   if (ValueTy->getPrimitiveSizeInBits() == 128) {
20243     Intrinsic::ID Int =
20244         IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
20245     Function *Ldxr = Intrinsic::getDeclaration(M, Int);
20246 
20247     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20248     Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
20249 
20250     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
20251     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
20252     Lo = Builder.CreateZExt(Lo, ValueTy, "lo64");
20253     Hi = Builder.CreateZExt(Hi, ValueTy, "hi64");
20254     return Builder.CreateOr(
20255         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValueTy, 64)), "val64");
20256   }
20257 
20258   Type *Tys[] = { Addr->getType() };
20259   Intrinsic::ID Int =
20260       IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
20261   Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
20262 
20263   const DataLayout &DL = M->getDataLayout();
20264   IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
20265   CallInst *CI = Builder.CreateCall(Ldxr, Addr);
20266   CI->addParamAttr(
20267       0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
20268   Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
20269 
20270   return Builder.CreateBitCast(Trunc, ValueTy);
20271 }
20272 
20273 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
20274     IRBuilderBase &Builder) const {
20275   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20276   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
20277 }
20278 
20279 Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
20280                                                    Value *Val, Value *Addr,
20281                                                    AtomicOrdering Ord) const {
20282   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20283   bool IsRelease = isReleaseOrStronger(Ord);
20284 
20285   // Since the intrinsics must have legal type, the i128 intrinsics take two
20286   // parameters: "i64, i64". We must marshal Val into the appropriate form
20287   // before the call.
20288   if (Val->getType()->getPrimitiveSizeInBits() == 128) {
20289     Intrinsic::ID Int =
20290         IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
20291     Function *Stxr = Intrinsic::getDeclaration(M, Int);
20292     Type *Int64Ty = Type::getInt64Ty(M->getContext());
20293 
20294     Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
20295     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
20296     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
20297     return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
20298   }
20299 
20300   Intrinsic::ID Int =
20301       IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
20302   Type *Tys[] = { Addr->getType() };
20303   Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
20304 
20305   const DataLayout &DL = M->getDataLayout();
20306   IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
20307   Val = Builder.CreateBitCast(Val, IntValTy);
20308 
20309   CallInst *CI = Builder.CreateCall(
20310       Stxr, {Builder.CreateZExtOrBitCast(
20311                  Val, Stxr->getFunctionType()->getParamType(0)),
20312              Addr});
20313   CI->addParamAttr(1, Attribute::get(Builder.getContext(),
20314                                      Attribute::ElementType, Val->getType()));
20315   return CI;
20316 }
20317 
20318 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
20319     Type *Ty, CallingConv::ID CallConv, bool isVarArg,
20320     const DataLayout &DL) const {
20321   if (!Ty->isArrayTy()) {
20322     const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
20323     return TySize.isScalable() && TySize.getKnownMinSize() > 128;
20324   }
20325 
20326   // All non aggregate members of the type must have the same type
20327   SmallVector<EVT> ValueVTs;
20328   ComputeValueVTs(*this, DL, Ty, ValueVTs);
20329   return is_splat(ValueVTs);
20330 }
20331 
20332 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
20333                                                             EVT) const {
20334   return false;
20335 }
20336 
20337 static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
20338   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
20339   Function *ThreadPointerFunc =
20340       Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
20341   return IRB.CreatePointerCast(
20342       IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
20343                              Offset),
20344       IRB.getInt8PtrTy()->getPointerTo(0));
20345 }
20346 
20347 Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
20348   // Android provides a fixed TLS slot for the stack cookie. See the definition
20349   // of TLS_SLOT_STACK_GUARD in
20350   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20351   if (Subtarget->isTargetAndroid())
20352     return UseTlsOffset(IRB, 0x28);
20353 
20354   // Fuchsia is similar.
20355   // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
20356   if (Subtarget->isTargetFuchsia())
20357     return UseTlsOffset(IRB, -0x10);
20358 
20359   return TargetLowering::getIRStackGuard(IRB);
20360 }
20361 
20362 void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {
20363   // MSVC CRT provides functionalities for stack protection.
20364   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) {
20365     // MSVC CRT has a global variable holding security cookie.
20366     M.getOrInsertGlobal("__security_cookie",
20367                         Type::getInt8PtrTy(M.getContext()));
20368 
20369     // MSVC CRT has a function to validate security cookie.
20370     FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
20371         "__security_check_cookie", Type::getVoidTy(M.getContext()),
20372         Type::getInt8PtrTy(M.getContext()));
20373     if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
20374       F->setCallingConv(CallingConv::Win64);
20375       F->addParamAttr(0, Attribute::AttrKind::InReg);
20376     }
20377     return;
20378   }
20379   TargetLowering::insertSSPDeclarations(M);
20380 }
20381 
20382 Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {
20383   // MSVC CRT has a global variable holding security cookie.
20384   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20385     return M.getGlobalVariable("__security_cookie");
20386   return TargetLowering::getSDagStackGuard(M);
20387 }
20388 
20389 Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {
20390   // MSVC CRT has a function to validate security cookie.
20391   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
20392     return M.getFunction("__security_check_cookie");
20393   return TargetLowering::getSSPStackGuardCheck(M);
20394 }
20395 
20396 Value *
20397 AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
20398   // Android provides a fixed TLS slot for the SafeStack pointer. See the
20399   // definition of TLS_SLOT_SAFESTACK in
20400   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
20401   if (Subtarget->isTargetAndroid())
20402     return UseTlsOffset(IRB, 0x48);
20403 
20404   // Fuchsia is similar.
20405   // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
20406   if (Subtarget->isTargetFuchsia())
20407     return UseTlsOffset(IRB, -0x8);
20408 
20409   return TargetLowering::getSafeStackPointerLocation(IRB);
20410 }
20411 
20412 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
20413     const Instruction &AndI) const {
20414   // Only sink 'and' mask to cmp use block if it is masking a single bit, since
20415   // this is likely to be fold the and/cmp/br into a single tbz instruction.  It
20416   // may be beneficial to sink in other cases, but we would have to check that
20417   // the cmp would not get folded into the br to form a cbz for these to be
20418   // beneficial.
20419   ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
20420   if (!Mask)
20421     return false;
20422   return Mask->getValue().isPowerOf2();
20423 }
20424 
20425 bool AArch64TargetLowering::
20426     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20427         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
20428         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
20429         SelectionDAG &DAG) const {
20430   // Does baseline recommend not to perform the fold by default?
20431   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
20432           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
20433     return false;
20434   // Else, if this is a vector shift, prefer 'shl'.
20435   return X.getValueType().isScalarInteger() || NewShiftOpcode == ISD::SHL;
20436 }
20437 
20438 bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
20439                                               SDNode *N) const {
20440   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
20441       !Subtarget->isTargetWindows() && !Subtarget->isTargetDarwin())
20442     return false;
20443   return true;
20444 }
20445 
20446 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
20447   // Update IsSplitCSR in AArch64unctionInfo.
20448   AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>();
20449   AFI->setIsSplitCSR(true);
20450 }
20451 
20452 void AArch64TargetLowering::insertCopiesSplitCSR(
20453     MachineBasicBlock *Entry,
20454     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
20455   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
20456   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
20457   if (!IStart)
20458     return;
20459 
20460   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20461   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
20462   MachineBasicBlock::iterator MBBI = Entry->begin();
20463   for (const MCPhysReg *I = IStart; *I; ++I) {
20464     const TargetRegisterClass *RC = nullptr;
20465     if (AArch64::GPR64RegClass.contains(*I))
20466       RC = &AArch64::GPR64RegClass;
20467     else if (AArch64::FPR64RegClass.contains(*I))
20468       RC = &AArch64::FPR64RegClass;
20469     else
20470       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
20471 
20472     Register NewVR = MRI->createVirtualRegister(RC);
20473     // Create copy from CSR to a virtual register.
20474     // FIXME: this currently does not emit CFI pseudo-instructions, it works
20475     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
20476     // nounwind. If we want to generalize this later, we may need to emit
20477     // CFI pseudo-instructions.
20478     assert(Entry->getParent()->getFunction().hasFnAttribute(
20479                Attribute::NoUnwind) &&
20480            "Function should be nounwind in insertCopiesSplitCSR!");
20481     Entry->addLiveIn(*I);
20482     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
20483         .addReg(*I);
20484 
20485     // Insert the copy-back instructions right before the terminator.
20486     for (auto *Exit : Exits)
20487       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
20488               TII->get(TargetOpcode::COPY), *I)
20489           .addReg(NewVR);
20490   }
20491 }
20492 
20493 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
20494   // Integer division on AArch64 is expensive. However, when aggressively
20495   // optimizing for code size, we prefer to use a div instruction, as it is
20496   // usually smaller than the alternative sequence.
20497   // The exception to this is vector division. Since AArch64 doesn't have vector
20498   // integer division, leaving the division as-is is a loss even in terms of
20499   // size, because it will have to be scalarized, while the alternative code
20500   // sequence can be performed in vector form.
20501   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
20502   return OptSize && !VT.isVector();
20503 }
20504 
20505 bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
20506   // We want inc-of-add for scalars and sub-of-not for vectors.
20507   return VT.isScalarInteger();
20508 }
20509 
20510 bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
20511                                                  EVT VT) const {
20512   // v8f16 without fp16 need to be extended to v8f32, which is more difficult to
20513   // legalize.
20514   if (FPVT == MVT::v8f16 && !Subtarget->hasFullFP16())
20515     return false;
20516   return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
20517 }
20518 
20519 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
20520   return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
20521 }
20522 
20523 unsigned
20524 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
20525   if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
20526     return getPointerTy(DL).getSizeInBits();
20527 
20528   return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
20529 }
20530 
20531 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
20532   MachineFrameInfo &MFI = MF.getFrameInfo();
20533   // If we have any vulnerable SVE stack objects then the stack protector
20534   // needs to be placed at the top of the SVE stack area, as the SVE locals
20535   // are placed above the other locals, so we allocate it as if it were a
20536   // scalable vector.
20537   // FIXME: It may be worthwhile having a specific interface for this rather
20538   // than doing it here in finalizeLowering.
20539   if (MFI.hasStackProtectorIndex()) {
20540     for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
20541       if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
20542           MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
20543         MFI.setStackID(MFI.getStackProtectorIndex(),
20544                        TargetStackID::ScalableVector);
20545         MFI.setObjectAlignment(MFI.getStackProtectorIndex(), Align(16));
20546         break;
20547       }
20548     }
20549   }
20550   MFI.computeMaxCallFrameSize(MF);
20551   TargetLoweringBase::finalizeLowering(MF);
20552 }
20553 
20554 // Unlike X86, we let frame lowering assign offsets to all catch objects.
20555 bool AArch64TargetLowering::needsFixedCatchObjects() const {
20556   return false;
20557 }
20558 
20559 bool AArch64TargetLowering::shouldLocalize(
20560     const MachineInstr &MI, const TargetTransformInfo *TTI) const {
20561   switch (MI.getOpcode()) {
20562   case TargetOpcode::G_GLOBAL_VALUE: {
20563     // On Darwin, TLS global vars get selected into function calls, which
20564     // we don't want localized, as they can get moved into the middle of a
20565     // another call sequence.
20566     const GlobalValue &GV = *MI.getOperand(1).getGlobal();
20567     if (GV.isThreadLocal() && Subtarget->isTargetMachO())
20568       return false;
20569     break;
20570   }
20571   // If we legalized G_GLOBAL_VALUE into ADRP + G_ADD_LOW, mark both as being
20572   // localizable.
20573   case AArch64::ADRP:
20574   case AArch64::G_ADD_LOW:
20575     return true;
20576   default:
20577     break;
20578   }
20579   return TargetLoweringBase::shouldLocalize(MI, TTI);
20580 }
20581 
20582 bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
20583   if (isa<ScalableVectorType>(Inst.getType()))
20584     return true;
20585 
20586   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
20587     if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
20588       return true;
20589 
20590   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
20591     if (isa<ScalableVectorType>(AI->getAllocatedType()))
20592       return true;
20593   }
20594 
20595   return false;
20596 }
20597 
20598 // Return the largest legal scalable vector type that matches VT's element type.
20599 static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {
20600   assert(VT.isFixedLengthVector() &&
20601          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20602          "Expected legal fixed length vector!");
20603   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20604   default:
20605     llvm_unreachable("unexpected element type for SVE container");
20606   case MVT::i8:
20607     return EVT(MVT::nxv16i8);
20608   case MVT::i16:
20609     return EVT(MVT::nxv8i16);
20610   case MVT::i32:
20611     return EVT(MVT::nxv4i32);
20612   case MVT::i64:
20613     return EVT(MVT::nxv2i64);
20614   case MVT::f16:
20615     return EVT(MVT::nxv8f16);
20616   case MVT::f32:
20617     return EVT(MVT::nxv4f32);
20618   case MVT::f64:
20619     return EVT(MVT::nxv2f64);
20620   }
20621 }
20622 
20623 // Return a PTRUE with active lanes corresponding to the extent of VT.
20624 static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
20625                                                 EVT VT) {
20626   assert(VT.isFixedLengthVector() &&
20627          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20628          "Expected legal fixed length vector!");
20629 
20630   Optional<unsigned> PgPattern =
20631       getSVEPredPatternFromNumElements(VT.getVectorNumElements());
20632   assert(PgPattern && "Unexpected element count for SVE predicate");
20633 
20634   // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use
20635   // AArch64SVEPredPattern::all, which can enable the use of unpredicated
20636   // variants of instructions when available.
20637   const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
20638   unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
20639   unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
20640   if (MaxSVESize && MinSVESize == MaxSVESize &&
20641       MaxSVESize == VT.getSizeInBits())
20642     PgPattern = AArch64SVEPredPattern::all;
20643 
20644   MVT MaskVT;
20645   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
20646   default:
20647     llvm_unreachable("unexpected element type for SVE predicate");
20648   case MVT::i8:
20649     MaskVT = MVT::nxv16i1;
20650     break;
20651   case MVT::i16:
20652   case MVT::f16:
20653     MaskVT = MVT::nxv8i1;
20654     break;
20655   case MVT::i32:
20656   case MVT::f32:
20657     MaskVT = MVT::nxv4i1;
20658     break;
20659   case MVT::i64:
20660   case MVT::f64:
20661     MaskVT = MVT::nxv2i1;
20662     break;
20663   }
20664 
20665   return getPTrue(DAG, DL, MaskVT, *PgPattern);
20666 }
20667 
20668 static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
20669                                              EVT VT) {
20670   assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
20671          "Expected legal scalable vector!");
20672   auto PredTy = VT.changeVectorElementType(MVT::i1);
20673   return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
20674 }
20675 
20676 static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
20677   if (VT.isFixedLengthVector())
20678     return getPredicateForFixedLengthVector(DAG, DL, VT);
20679 
20680   return getPredicateForScalableVector(DAG, DL, VT);
20681 }
20682 
20683 // Grow V to consume an entire SVE register.
20684 static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20685   assert(VT.isScalableVector() &&
20686          "Expected to convert into a scalable vector!");
20687   assert(V.getValueType().isFixedLengthVector() &&
20688          "Expected a fixed length vector operand!");
20689   SDLoc DL(V);
20690   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20691   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
20692 }
20693 
20694 // Shrink V so it's just big enough to maintain a VT's worth of data.
20695 static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
20696   assert(VT.isFixedLengthVector() &&
20697          "Expected to convert into a fixed length vector!");
20698   assert(V.getValueType().isScalableVector() &&
20699          "Expected a scalable vector operand!");
20700   SDLoc DL(V);
20701   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
20702   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
20703 }
20704 
20705 // Convert all fixed length vector loads larger than NEON to masked_loads.
20706 SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
20707     SDValue Op, SelectionDAG &DAG) const {
20708   auto Load = cast<LoadSDNode>(Op);
20709 
20710   SDLoc DL(Op);
20711   EVT VT = Op.getValueType();
20712   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20713   EVT LoadVT = ContainerVT;
20714   EVT MemVT = Load->getMemoryVT();
20715 
20716   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
20717 
20718   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
20719     LoadVT = ContainerVT.changeTypeToInteger();
20720     MemVT = MemVT.changeTypeToInteger();
20721   }
20722 
20723   SDValue NewLoad = DAG.getMaskedLoad(
20724       LoadVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), Pg,
20725       DAG.getUNDEF(LoadVT), MemVT, Load->getMemOperand(),
20726       Load->getAddressingMode(), Load->getExtensionType());
20727 
20728   SDValue Result = NewLoad;
20729   if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
20730     EVT ExtendVT = ContainerVT.changeVectorElementType(
20731         Load->getMemoryVT().getVectorElementType());
20732 
20733     Result = getSVESafeBitCast(ExtendVT, Result, DAG);
20734     Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
20735                          Pg, Result, DAG.getUNDEF(ContainerVT));
20736   }
20737 
20738   Result = convertFromScalableVector(DAG, VT, Result);
20739   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
20740   return DAG.getMergeValues(MergedValues, DL);
20741 }
20742 
20743 static SDValue convertFixedMaskToScalableVector(SDValue Mask,
20744                                                 SelectionDAG &DAG) {
20745   SDLoc DL(Mask);
20746   EVT InVT = Mask.getValueType();
20747   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
20748 
20749   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
20750 
20751   if (ISD::isBuildVectorAllOnes(Mask.getNode()))
20752     return Pg;
20753 
20754   auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask);
20755   auto Op2 = DAG.getConstant(0, DL, ContainerVT);
20756 
20757   return DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, Pg.getValueType(),
20758                      {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)});
20759 }
20760 
20761 // Convert all fixed length vector loads larger than NEON to masked_loads.
20762 SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
20763     SDValue Op, SelectionDAG &DAG) const {
20764   auto Load = cast<MaskedLoadSDNode>(Op);
20765 
20766   SDLoc DL(Op);
20767   EVT VT = Op.getValueType();
20768   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20769 
20770   SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG);
20771 
20772   SDValue PassThru;
20773   bool IsPassThruZeroOrUndef = false;
20774 
20775   if (Load->getPassThru()->isUndef()) {
20776     PassThru = DAG.getUNDEF(ContainerVT);
20777     IsPassThruZeroOrUndef = true;
20778   } else {
20779     if (ContainerVT.isInteger())
20780       PassThru = DAG.getConstant(0, DL, ContainerVT);
20781     else
20782       PassThru = DAG.getConstantFP(0, DL, ContainerVT);
20783     if (isZerosVector(Load->getPassThru().getNode()))
20784       IsPassThruZeroOrUndef = true;
20785   }
20786 
20787   SDValue NewLoad = DAG.getMaskedLoad(
20788       ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(),
20789       Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(),
20790       Load->getAddressingMode(), Load->getExtensionType());
20791 
20792   SDValue Result = NewLoad;
20793   if (!IsPassThruZeroOrUndef) {
20794     SDValue OldPassThru =
20795         convertToScalableVector(DAG, ContainerVT, Load->getPassThru());
20796     Result = DAG.getSelect(DL, ContainerVT, Mask, Result, OldPassThru);
20797   }
20798 
20799   Result = convertFromScalableVector(DAG, VT, Result);
20800   SDValue MergedValues[2] = {Result, NewLoad.getValue(1)};
20801   return DAG.getMergeValues(MergedValues, DL);
20802 }
20803 
20804 // Convert all fixed length vector stores larger than NEON to masked_stores.
20805 SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
20806     SDValue Op, SelectionDAG &DAG) const {
20807   auto Store = cast<StoreSDNode>(Op);
20808 
20809   SDLoc DL(Op);
20810   EVT VT = Store->getValue().getValueType();
20811   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20812   EVT MemVT = Store->getMemoryVT();
20813 
20814   auto Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
20815   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
20816 
20817   if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
20818     EVT TruncVT = ContainerVT.changeVectorElementType(
20819         Store->getMemoryVT().getVectorElementType());
20820     MemVT = MemVT.changeTypeToInteger();
20821     NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
20822                            NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
20823                            DAG.getUNDEF(TruncVT));
20824     NewValue =
20825         getSVESafeBitCast(ContainerVT.changeTypeToInteger(), NewValue, DAG);
20826   }
20827 
20828   return DAG.getMaskedStore(Store->getChain(), DL, NewValue,
20829                             Store->getBasePtr(), Store->getOffset(), Pg, MemVT,
20830                             Store->getMemOperand(), Store->getAddressingMode(),
20831                             Store->isTruncatingStore());
20832 }
20833 
20834 SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
20835     SDValue Op, SelectionDAG &DAG) const {
20836   auto *Store = cast<MaskedStoreSDNode>(Op);
20837 
20838   SDLoc DL(Op);
20839   EVT VT = Store->getValue().getValueType();
20840   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20841 
20842   auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue());
20843   SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG);
20844 
20845   return DAG.getMaskedStore(
20846       Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(),
20847       Mask, Store->getMemoryVT(), Store->getMemOperand(),
20848       Store->getAddressingMode(), Store->isTruncatingStore());
20849 }
20850 
20851 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
20852     SDValue Op, SelectionDAG &DAG) const {
20853   SDLoc dl(Op);
20854   EVT VT = Op.getValueType();
20855   EVT EltVT = VT.getVectorElementType();
20856 
20857   bool Signed = Op.getOpcode() == ISD::SDIV;
20858   unsigned PredOpcode = Signed ? AArch64ISD::SDIV_PRED : AArch64ISD::UDIV_PRED;
20859 
20860   bool Negated;
20861   uint64_t SplatVal;
20862   if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
20863     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20864     SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
20865     SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), dl, MVT::i32);
20866 
20867     SDValue Pg = getPredicateForFixedLengthVector(DAG, dl, VT);
20868     SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, ContainerVT, Pg, Op1, Op2);
20869     if (Negated)
20870       Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res);
20871 
20872     return convertFromScalableVector(DAG, VT, Res);
20873   }
20874 
20875   // Scalable vector i32/i64 DIV is supported.
20876   if (EltVT == MVT::i32 || EltVT == MVT::i64)
20877     return LowerToPredicatedOp(Op, DAG, PredOpcode);
20878 
20879   // Scalable vector i8/i16 DIV is not supported. Promote it to i32.
20880   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
20881   EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
20882   EVT FixedWidenedVT = HalfVT.widenIntegerVectorElementType(*DAG.getContext());
20883   EVT ScalableWidenedVT = getContainerForFixedLengthVector(DAG, FixedWidenedVT);
20884 
20885   // If this is not a full vector, extend, div, and truncate it.
20886   EVT WidenedVT = VT.widenIntegerVectorElementType(*DAG.getContext());
20887   if (DAG.getTargetLoweringInfo().isTypeLegal(WidenedVT)) {
20888     unsigned ExtendOpcode = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20889     SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(0));
20890     SDValue Op1 = DAG.getNode(ExtendOpcode, dl, WidenedVT, Op.getOperand(1));
20891     SDValue Div = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0, Op1);
20892     return DAG.getNode(ISD::TRUNCATE, dl, VT, Div);
20893   }
20894 
20895   // Convert the operands to scalable vectors.
20896   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
20897   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
20898 
20899   // Extend the scalable operands.
20900   unsigned UnpkLo = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
20901   unsigned UnpkHi = Signed ? AArch64ISD::SUNPKHI : AArch64ISD::UUNPKHI;
20902   SDValue Op0Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op0);
20903   SDValue Op1Lo = DAG.getNode(UnpkLo, dl, ScalableWidenedVT, Op1);
20904   SDValue Op0Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op0);
20905   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, ScalableWidenedVT, Op1);
20906 
20907   // Convert back to fixed vectors so the DIV can be further lowered.
20908   Op0Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op0Lo);
20909   Op1Lo = convertFromScalableVector(DAG, FixedWidenedVT, Op1Lo);
20910   Op0Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op0Hi);
20911   Op1Hi = convertFromScalableVector(DAG, FixedWidenedVT, Op1Hi);
20912   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
20913                                  Op0Lo, Op1Lo);
20914   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, FixedWidenedVT,
20915                                  Op0Hi, Op1Hi);
20916 
20917   // Convert again to scalable vectors to truncate.
20918   ResultLo = convertToScalableVector(DAG, ScalableWidenedVT, ResultLo);
20919   ResultHi = convertToScalableVector(DAG, ScalableWidenedVT, ResultHi);
20920   SDValue ScalableResult = DAG.getNode(AArch64ISD::UZP1, dl, ContainerVT,
20921                                        ResultLo, ResultHi);
20922 
20923   return convertFromScalableVector(DAG, VT, ScalableResult);
20924 }
20925 
20926 SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
20927     SDValue Op, SelectionDAG &DAG) const {
20928   EVT VT = Op.getValueType();
20929   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
20930 
20931   SDLoc DL(Op);
20932   SDValue Val = Op.getOperand(0);
20933   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
20934   Val = convertToScalableVector(DAG, ContainerVT, Val);
20935 
20936   bool Signed = Op.getOpcode() == ISD::SIGN_EXTEND;
20937   unsigned ExtendOpc = Signed ? AArch64ISD::SUNPKLO : AArch64ISD::UUNPKLO;
20938 
20939   // Repeatedly unpack Val until the result is of the desired element type.
20940   switch (ContainerVT.getSimpleVT().SimpleTy) {
20941   default:
20942     llvm_unreachable("unimplemented container type");
20943   case MVT::nxv16i8:
20944     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv8i16, Val);
20945     if (VT.getVectorElementType() == MVT::i16)
20946       break;
20947     LLVM_FALLTHROUGH;
20948   case MVT::nxv8i16:
20949     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv4i32, Val);
20950     if (VT.getVectorElementType() == MVT::i32)
20951       break;
20952     LLVM_FALLTHROUGH;
20953   case MVT::nxv4i32:
20954     Val = DAG.getNode(ExtendOpc, DL, MVT::nxv2i64, Val);
20955     assert(VT.getVectorElementType() == MVT::i64 && "Unexpected element type!");
20956     break;
20957   }
20958 
20959   return convertFromScalableVector(DAG, VT, Val);
20960 }
20961 
20962 SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
20963     SDValue Op, SelectionDAG &DAG) const {
20964   EVT VT = Op.getValueType();
20965   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
20966 
20967   SDLoc DL(Op);
20968   SDValue Val = Op.getOperand(0);
20969   EVT ContainerVT = getContainerForFixedLengthVector(DAG, Val.getValueType());
20970   Val = convertToScalableVector(DAG, ContainerVT, Val);
20971 
20972   // Repeatedly truncate Val until the result is of the desired element type.
20973   switch (ContainerVT.getSimpleVT().SimpleTy) {
20974   default:
20975     llvm_unreachable("unimplemented container type");
20976   case MVT::nxv2i64:
20977     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv4i32, Val);
20978     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv4i32, Val, Val);
20979     if (VT.getVectorElementType() == MVT::i32)
20980       break;
20981     LLVM_FALLTHROUGH;
20982   case MVT::nxv4i32:
20983     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv8i16, Val);
20984     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv8i16, Val, Val);
20985     if (VT.getVectorElementType() == MVT::i16)
20986       break;
20987     LLVM_FALLTHROUGH;
20988   case MVT::nxv8i16:
20989     Val = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i8, Val);
20990     Val = DAG.getNode(AArch64ISD::UZP1, DL, MVT::nxv16i8, Val, Val);
20991     assert(VT.getVectorElementType() == MVT::i8 && "Unexpected element type!");
20992     break;
20993   }
20994 
20995   return convertFromScalableVector(DAG, VT, Val);
20996 }
20997 
20998 SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
20999     SDValue Op, SelectionDAG &DAG) const {
21000   EVT VT = Op.getValueType();
21001   EVT InVT = Op.getOperand(0).getValueType();
21002   assert(InVT.isFixedLengthVector() && "Expected fixed length vector type!");
21003 
21004   SDLoc DL(Op);
21005   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21006   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21007 
21008   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Op.getOperand(1));
21009 }
21010 
21011 SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
21012     SDValue Op, SelectionDAG &DAG) const {
21013   EVT VT = Op.getValueType();
21014   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21015 
21016   SDLoc DL(Op);
21017   EVT InVT = Op.getOperand(0).getValueType();
21018   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21019   SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0));
21020 
21021   auto ScalableRes = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Op0,
21022                                  Op.getOperand(1), Op.getOperand(2));
21023 
21024   return convertFromScalableVector(DAG, VT, ScalableRes);
21025 }
21026 
21027 // Convert vector operation 'Op' to an equivalent predicated operation whereby
21028 // the original operation's type is used to construct a suitable predicate.
21029 // NOTE: The results for inactive lanes are undefined.
21030 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
21031                                                    SelectionDAG &DAG,
21032                                                    unsigned NewOp) const {
21033   EVT VT = Op.getValueType();
21034   SDLoc DL(Op);
21035   auto Pg = getPredicateForVector(DAG, DL, VT);
21036 
21037   if (VT.isFixedLengthVector()) {
21038     assert(isTypeLegal(VT) && "Expected only legal fixed-width types");
21039     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21040 
21041     // Create list of operands by converting existing ones to scalable types.
21042     SmallVector<SDValue, 4> Operands = {Pg};
21043     for (const SDValue &V : Op->op_values()) {
21044       if (isa<CondCodeSDNode>(V)) {
21045         Operands.push_back(V);
21046         continue;
21047       }
21048 
21049       if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
21050         EVT VTArg = VTNode->getVT().getVectorElementType();
21051         EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
21052         Operands.push_back(DAG.getValueType(NewVTArg));
21053         continue;
21054       }
21055 
21056       assert(isTypeLegal(V.getValueType()) &&
21057              "Expected only legal fixed-width types");
21058       Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
21059     }
21060 
21061     if (isMergePassthruOpcode(NewOp))
21062       Operands.push_back(DAG.getUNDEF(ContainerVT));
21063 
21064     auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
21065     return convertFromScalableVector(DAG, VT, ScalableRes);
21066   }
21067 
21068   assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
21069 
21070   SmallVector<SDValue, 4> Operands = {Pg};
21071   for (const SDValue &V : Op->op_values()) {
21072     assert((!V.getValueType().isVector() ||
21073             V.getValueType().isScalableVector()) &&
21074            "Only scalable vectors are supported!");
21075     Operands.push_back(V);
21076   }
21077 
21078   if (isMergePassthruOpcode(NewOp))
21079     Operands.push_back(DAG.getUNDEF(VT));
21080 
21081   return DAG.getNode(NewOp, DL, VT, Operands, Op->getFlags());
21082 }
21083 
21084 // If a fixed length vector operation has no side effects when applied to
21085 // undefined elements, we can safely use scalable vectors to perform the same
21086 // operation without needing to worry about predication.
21087 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
21088                                                  SelectionDAG &DAG) const {
21089   EVT VT = Op.getValueType();
21090   assert(useSVEForFixedLengthVectorVT(VT) &&
21091          "Only expected to lower fixed length vector operation!");
21092   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21093 
21094   // Create list of operands by converting existing ones to scalable types.
21095   SmallVector<SDValue, 4> Ops;
21096   for (const SDValue &V : Op->op_values()) {
21097     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
21098 
21099     // Pass through non-vector operands.
21100     if (!V.getValueType().isVector()) {
21101       Ops.push_back(V);
21102       continue;
21103     }
21104 
21105     // "cast" fixed length vector to a scalable vector.
21106     assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
21107            "Only fixed length vectors are supported!");
21108     Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
21109   }
21110 
21111   auto ScalableRes = DAG.getNode(Op.getOpcode(), SDLoc(Op), ContainerVT, Ops);
21112   return convertFromScalableVector(DAG, VT, ScalableRes);
21113 }
21114 
21115 SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
21116     SelectionDAG &DAG) const {
21117   SDLoc DL(ScalarOp);
21118   SDValue AccOp = ScalarOp.getOperand(0);
21119   SDValue VecOp = ScalarOp.getOperand(1);
21120   EVT SrcVT = VecOp.getValueType();
21121   EVT ResVT = SrcVT.getVectorElementType();
21122 
21123   EVT ContainerVT = SrcVT;
21124   if (SrcVT.isFixedLengthVector()) {
21125     ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21126     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21127   }
21128 
21129   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21130   SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
21131 
21132   // Convert operands to Scalable.
21133   AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
21134                       DAG.getUNDEF(ContainerVT), AccOp, Zero);
21135 
21136   // Perform reduction.
21137   SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
21138                             Pg, AccOp, VecOp);
21139 
21140   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero);
21141 }
21142 
21143 SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
21144                                                        SelectionDAG &DAG) const {
21145   SDLoc DL(ReduceOp);
21146   SDValue Op = ReduceOp.getOperand(0);
21147   EVT OpVT = Op.getValueType();
21148   EVT VT = ReduceOp.getValueType();
21149 
21150   if (!OpVT.isScalableVector() || OpVT.getVectorElementType() != MVT::i1)
21151     return SDValue();
21152 
21153   SDValue Pg = getPredicateForVector(DAG, DL, OpVT);
21154 
21155   switch (ReduceOp.getOpcode()) {
21156   default:
21157     return SDValue();
21158   case ISD::VECREDUCE_OR:
21159     if (isAllActivePredicate(DAG, Pg) && OpVT == MVT::nxv16i1)
21160       // The predicate can be 'Op' because
21161       // vecreduce_or(Op & <all true>) <=> vecreduce_or(Op).
21162       return getPTest(DAG, VT, Op, Op, AArch64CC::ANY_ACTIVE);
21163     else
21164       return getPTest(DAG, VT, Pg, Op, AArch64CC::ANY_ACTIVE);
21165   case ISD::VECREDUCE_AND: {
21166     Op = DAG.getNode(ISD::XOR, DL, OpVT, Op, Pg);
21167     return getPTest(DAG, VT, Pg, Op, AArch64CC::NONE_ACTIVE);
21168   }
21169   case ISD::VECREDUCE_XOR: {
21170     SDValue ID =
21171         DAG.getTargetConstant(Intrinsic::aarch64_sve_cntp, DL, MVT::i64);
21172     if (OpVT == MVT::nxv1i1) {
21173       // Emulate a CNTP on .Q using .D and a different governing predicate.
21174       Pg = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Pg);
21175       Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv2i1, Op);
21176     }
21177     SDValue Cntp =
21178         DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i64, ID, Pg, Op);
21179     return DAG.getAnyExtOrTrunc(Cntp, DL, VT);
21180   }
21181   }
21182 
21183   return SDValue();
21184 }
21185 
21186 SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
21187                                                    SDValue ScalarOp,
21188                                                    SelectionDAG &DAG) const {
21189   SDLoc DL(ScalarOp);
21190   SDValue VecOp = ScalarOp.getOperand(0);
21191   EVT SrcVT = VecOp.getValueType();
21192 
21193   if (useSVEForFixedLengthVectorVT(
21194           SrcVT,
21195           /*OverrideNEON=*/Subtarget->useSVEForFixedLengthVectors())) {
21196     EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
21197     VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
21198   }
21199 
21200   // UADDV always returns an i64 result.
21201   EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
21202                                                    SrcVT.getVectorElementType();
21203   EVT RdxVT = SrcVT;
21204   if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
21205     RdxVT = getPackedSVEVectorVT(ResVT);
21206 
21207   SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
21208   SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
21209   SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
21210                             Rdx, DAG.getConstant(0, DL, MVT::i64));
21211 
21212   // The VEC_REDUCE nodes expect an element size result.
21213   if (ResVT != ScalarOp.getValueType())
21214     Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
21215 
21216   return Res;
21217 }
21218 
21219 SDValue
21220 AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
21221     SelectionDAG &DAG) const {
21222   EVT VT = Op.getValueType();
21223   SDLoc DL(Op);
21224 
21225   EVT InVT = Op.getOperand(1).getValueType();
21226   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21227   SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(1));
21228   SDValue Op2 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(2));
21229 
21230   // Convert the mask to a predicated (NOTE: We don't need to worry about
21231   // inactive lanes since VSELECT is safe when given undefined elements).
21232   EVT MaskVT = Op.getOperand(0).getValueType();
21233   EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
21234   auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
21235   Mask = DAG.getNode(ISD::TRUNCATE, DL,
21236                      MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
21237 
21238   auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
21239                                 Mask, Op1, Op2);
21240 
21241   return convertFromScalableVector(DAG, VT, ScalableRes);
21242 }
21243 
21244 SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
21245     SDValue Op, SelectionDAG &DAG) const {
21246   SDLoc DL(Op);
21247   EVT InVT = Op.getOperand(0).getValueType();
21248   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
21249 
21250   assert(useSVEForFixedLengthVectorVT(InVT) &&
21251          "Only expected to lower fixed length vector operation!");
21252   assert(Op.getValueType() == InVT.changeTypeToInteger() &&
21253          "Expected integer result of the same bit length as the inputs!");
21254 
21255   auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
21256   auto Op2 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(1));
21257   auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT);
21258 
21259   EVT CmpVT = Pg.getValueType();
21260   auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT,
21261                          {Pg, Op1, Op2, Op.getOperand(2)});
21262 
21263   EVT PromoteVT = ContainerVT.changeTypeToInteger();
21264   auto Promote = DAG.getBoolExtOrTrunc(Cmp, DL, PromoteVT, InVT);
21265   return convertFromScalableVector(DAG, Op.getValueType(), Promote);
21266 }
21267 
21268 SDValue
21269 AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
21270                                                     SelectionDAG &DAG) const {
21271   SDLoc DL(Op);
21272   auto SrcOp = Op.getOperand(0);
21273   EVT VT = Op.getValueType();
21274   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21275   EVT ContainerSrcVT =
21276       getContainerForFixedLengthVector(DAG, SrcOp.getValueType());
21277 
21278   SrcOp = convertToScalableVector(DAG, ContainerSrcVT, SrcOp);
21279   Op = DAG.getNode(ISD::BITCAST, DL, ContainerDstVT, SrcOp);
21280   return convertFromScalableVector(DAG, VT, Op);
21281 }
21282 
21283 SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
21284     SDValue Op, SelectionDAG &DAG) const {
21285   SDLoc DL(Op);
21286   unsigned NumOperands = Op->getNumOperands();
21287 
21288   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
21289          "Unexpected number of operands in CONCAT_VECTORS");
21290 
21291   auto SrcOp1 = Op.getOperand(0);
21292   auto SrcOp2 = Op.getOperand(1);
21293   EVT VT = Op.getValueType();
21294   EVT SrcVT = SrcOp1.getValueType();
21295 
21296   if (NumOperands > 2) {
21297     SmallVector<SDValue, 4> Ops;
21298     EVT PairVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
21299     for (unsigned I = 0; I < NumOperands; I += 2)
21300       Ops.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, PairVT,
21301                                 Op->getOperand(I), Op->getOperand(I + 1)));
21302 
21303     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
21304   }
21305 
21306   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21307 
21308   SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
21309   SrcOp1 = convertToScalableVector(DAG, ContainerVT, SrcOp1);
21310   SrcOp2 = convertToScalableVector(DAG, ContainerVT, SrcOp2);
21311 
21312   Op = DAG.getNode(AArch64ISD::SPLICE, DL, ContainerVT, Pg, SrcOp1, SrcOp2);
21313 
21314   return convertFromScalableVector(DAG, VT, Op);
21315 }
21316 
21317 SDValue
21318 AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
21319                                                      SelectionDAG &DAG) const {
21320   EVT VT = Op.getValueType();
21321   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21322 
21323   SDLoc DL(Op);
21324   SDValue Val = Op.getOperand(0);
21325   SDValue Pg = getPredicateForVector(DAG, DL, VT);
21326   EVT SrcVT = Val.getValueType();
21327   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21328   EVT ExtendVT = ContainerVT.changeVectorElementType(
21329       SrcVT.getVectorElementType());
21330 
21331   Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21332   Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
21333 
21334   Val = convertToScalableVector(DAG, ContainerVT.changeTypeToInteger(), Val);
21335   Val = getSVESafeBitCast(ExtendVT, Val, DAG);
21336   Val = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
21337                     Pg, Val, DAG.getUNDEF(ContainerVT));
21338 
21339   return convertFromScalableVector(DAG, VT, Val);
21340 }
21341 
21342 SDValue
21343 AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
21344                                                     SelectionDAG &DAG) const {
21345   EVT VT = Op.getValueType();
21346   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21347 
21348   SDLoc DL(Op);
21349   SDValue Val = Op.getOperand(0);
21350   EVT SrcVT = Val.getValueType();
21351   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21352   EVT RoundVT = ContainerSrcVT.changeVectorElementType(
21353       VT.getVectorElementType());
21354   SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
21355 
21356   Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21357   Val = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, RoundVT, Pg, Val,
21358                     Op.getOperand(1), DAG.getUNDEF(RoundVT));
21359   Val = getSVESafeBitCast(ContainerSrcVT.changeTypeToInteger(), Val, DAG);
21360   Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21361 
21362   Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21363   return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21364 }
21365 
21366 SDValue
21367 AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
21368                                                     SelectionDAG &DAG) const {
21369   EVT VT = Op.getValueType();
21370   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21371 
21372   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP;
21373   unsigned Opcode = IsSigned ? AArch64ISD::SINT_TO_FP_MERGE_PASSTHRU
21374                              : AArch64ISD::UINT_TO_FP_MERGE_PASSTHRU;
21375 
21376   SDLoc DL(Op);
21377   SDValue Val = Op.getOperand(0);
21378   EVT SrcVT = Val.getValueType();
21379   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21380   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21381 
21382   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21383       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21384     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21385 
21386     Val = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
21387                       VT.changeTypeToInteger(), Val);
21388 
21389     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21390     Val = getSVESafeBitCast(ContainerDstVT.changeTypeToInteger(), Val, DAG);
21391     // Safe to use a larger than specified operand since we just unpacked the
21392     // data, hence the upper bits are zero.
21393     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21394                       DAG.getUNDEF(ContainerDstVT));
21395     return convertFromScalableVector(DAG, VT, Val);
21396   } else {
21397     EVT CvtVT = ContainerSrcVT.changeVectorElementType(
21398         ContainerDstVT.getVectorElementType());
21399     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21400 
21401     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21402     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21403     Val = getSVESafeBitCast(ContainerSrcVT, Val, DAG);
21404     Val = convertFromScalableVector(DAG, SrcVT, Val);
21405 
21406     Val = DAG.getNode(ISD::TRUNCATE, DL, VT.changeTypeToInteger(), Val);
21407     return DAG.getNode(ISD::BITCAST, DL, VT, Val);
21408   }
21409 }
21410 
21411 SDValue
21412 AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
21413                                                     SelectionDAG &DAG) const {
21414   EVT VT = Op.getValueType();
21415   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21416 
21417   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
21418   unsigned Opcode = IsSigned ? AArch64ISD::FCVTZS_MERGE_PASSTHRU
21419                              : AArch64ISD::FCVTZU_MERGE_PASSTHRU;
21420 
21421   SDLoc DL(Op);
21422   SDValue Val = Op.getOperand(0);
21423   EVT SrcVT = Val.getValueType();
21424   EVT ContainerDstVT = getContainerForFixedLengthVector(DAG, VT);
21425   EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
21426 
21427   if (ContainerSrcVT.getVectorElementType().getSizeInBits() <=
21428       ContainerDstVT.getVectorElementType().getSizeInBits()) {
21429     EVT CvtVT = ContainerDstVT.changeVectorElementType(
21430       ContainerSrcVT.getVectorElementType());
21431     SDValue Pg = getPredicateForVector(DAG, DL, VT);
21432 
21433     Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
21434     Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Val);
21435 
21436     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21437     Val = getSVESafeBitCast(CvtVT, Val, DAG);
21438     Val = DAG.getNode(Opcode, DL, ContainerDstVT, Pg, Val,
21439                       DAG.getUNDEF(ContainerDstVT));
21440     return convertFromScalableVector(DAG, VT, Val);
21441   } else {
21442     EVT CvtVT = ContainerSrcVT.changeTypeToInteger();
21443     SDValue Pg = getPredicateForVector(DAG, DL, CvtVT);
21444 
21445     // Safe to use a larger than specified result since an fp_to_int where the
21446     // result doesn't fit into the destination is undefined.
21447     Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
21448     Val = DAG.getNode(Opcode, DL, CvtVT, Pg, Val, DAG.getUNDEF(CvtVT));
21449     Val = convertFromScalableVector(DAG, SrcVT.changeTypeToInteger(), Val);
21450 
21451     return DAG.getNode(ISD::TRUNCATE, DL, VT, Val);
21452   }
21453 }
21454 
21455 SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
21456     SDValue Op, SelectionDAG &DAG) const {
21457   EVT VT = Op.getValueType();
21458   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
21459 
21460   auto *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
21461   auto ShuffleMask = SVN->getMask();
21462 
21463   SDLoc DL(Op);
21464   SDValue Op1 = Op.getOperand(0);
21465   SDValue Op2 = Op.getOperand(1);
21466 
21467   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
21468   Op1 = convertToScalableVector(DAG, ContainerVT, Op1);
21469   Op2 = convertToScalableVector(DAG, ContainerVT, Op2);
21470 
21471   bool ReverseEXT = false;
21472   unsigned Imm;
21473   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
21474       Imm == VT.getVectorNumElements() - 1) {
21475     if (ReverseEXT)
21476       std::swap(Op1, Op2);
21477 
21478     EVT ScalarTy = VT.getVectorElementType();
21479     if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16))
21480       ScalarTy = MVT::i32;
21481     SDValue Scalar = DAG.getNode(
21482         ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1,
21483         DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64));
21484     Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar);
21485     return convertFromScalableVector(DAG, VT, Op);
21486   }
21487 
21488   for (unsigned LaneSize : {64U, 32U, 16U}) {
21489     if (isREVMask(ShuffleMask, VT, LaneSize)) {
21490       EVT NewVT =
21491           getPackedSVEVectorVT(EVT::getIntegerVT(*DAG.getContext(), LaneSize));
21492       unsigned RevOp;
21493       unsigned EltSz = VT.getScalarSizeInBits();
21494       if (EltSz == 8)
21495         RevOp = AArch64ISD::BSWAP_MERGE_PASSTHRU;
21496       else if (EltSz == 16)
21497         RevOp = AArch64ISD::REVH_MERGE_PASSTHRU;
21498       else
21499         RevOp = AArch64ISD::REVW_MERGE_PASSTHRU;
21500 
21501       Op = DAG.getNode(ISD::BITCAST, DL, NewVT, Op1);
21502       Op = LowerToPredicatedOp(Op, DAG, RevOp);
21503       Op = DAG.getNode(ISD::BITCAST, DL, ContainerVT, Op);
21504       return convertFromScalableVector(DAG, VT, Op);
21505     }
21506   }
21507 
21508   unsigned WhichResult;
21509   if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21510     return convertFromScalableVector(
21511         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op2));
21512 
21513   if (isTRNMask(ShuffleMask, VT, WhichResult)) {
21514     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21515     return convertFromScalableVector(
21516         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21517   }
21518 
21519   if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0)
21520     return convertFromScalableVector(
21521         DAG, VT, DAG.getNode(AArch64ISD::ZIP1, DL, ContainerVT, Op1, Op1));
21522 
21523   if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21524     unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2;
21525     return convertFromScalableVector(
21526         DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21527   }
21528 
21529   // Functions like isZIPMask return true when a ISD::VECTOR_SHUFFLE's mask
21530   // represents the same logical operation as performed by a ZIP instruction. In
21531   // isolation these functions do not mean the ISD::VECTOR_SHUFFLE is exactly
21532   // equivalent to an AArch64 instruction. There's the extra component of
21533   // ISD::VECTOR_SHUFFLE's value type to consider. Prior to SVE these functions
21534   // only operated on 64/128bit vector types that have a direct mapping to a
21535   // target register and so an exact mapping is implied.
21536   // However, when using SVE for fixed length vectors, most legal vector types
21537   // are actually sub-vectors of a larger SVE register. When mapping
21538   // ISD::VECTOR_SHUFFLE to an SVE instruction care must be taken to consider
21539   // how the mask's indices translate. Specifically, when the mapping requires
21540   // an exact meaning for a specific vector index (e.g. Index X is the last
21541   // vector element in the register) then such mappings are often only safe when
21542   // the exact SVE register size is know. The main exception to this is when
21543   // indices are logically relative to the first element of either
21544   // ISD::VECTOR_SHUFFLE operand because these relative indices don't change
21545   // when converting from fixed-length to scalable vector types (i.e. the start
21546   // of a fixed length vector is always the start of a scalable vector).
21547   unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
21548   unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
21549   if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
21550     if (ShuffleVectorInst::isReverseMask(ShuffleMask) && Op2.isUndef()) {
21551       Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
21552       return convertFromScalableVector(DAG, VT, Op);
21553     }
21554 
21555     if (isZIPMask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21556       return convertFromScalableVector(
21557           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op2));
21558 
21559     if (isUZPMask(ShuffleMask, VT, WhichResult)) {
21560       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21561       return convertFromScalableVector(
21562           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op2));
21563     }
21564 
21565     if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0)
21566       return convertFromScalableVector(
21567           DAG, VT, DAG.getNode(AArch64ISD::ZIP2, DL, ContainerVT, Op1, Op1));
21568 
21569     if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) {
21570       unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2;
21571       return convertFromScalableVector(
21572           DAG, VT, DAG.getNode(Opc, DL, ContainerVT, Op1, Op1));
21573     }
21574   }
21575 
21576   return SDValue();
21577 }
21578 
21579 SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
21580                                                  SelectionDAG &DAG) const {
21581   SDLoc DL(Op);
21582   EVT InVT = Op.getValueType();
21583 
21584   assert(VT.isScalableVector() && isTypeLegal(VT) &&
21585          InVT.isScalableVector() && isTypeLegal(InVT) &&
21586          "Only expect to cast between legal scalable vector types!");
21587   assert(VT.getVectorElementType() != MVT::i1 &&
21588          InVT.getVectorElementType() != MVT::i1 &&
21589          "For predicate bitcasts, use getSVEPredicateBitCast");
21590 
21591   if (InVT == VT)
21592     return Op;
21593 
21594   EVT PackedVT = getPackedSVEVectorVT(VT.getVectorElementType());
21595   EVT PackedInVT = getPackedSVEVectorVT(InVT.getVectorElementType());
21596 
21597   // Safe bitcasting between unpacked vector types of different element counts
21598   // is currently unsupported because the following is missing the necessary
21599   // work to ensure the result's elements live where they're supposed to within
21600   // an SVE register.
21601   //                01234567
21602   // e.g. nxv2i32 = XX??XX??
21603   //      nxv4f16 = X?X?X?X?
21604   assert((VT.getVectorElementCount() == InVT.getVectorElementCount() ||
21605           VT == PackedVT || InVT == PackedInVT) &&
21606          "Unexpected bitcast!");
21607 
21608   // Pack input if required.
21609   if (InVT != PackedInVT)
21610     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
21611 
21612   Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
21613 
21614   // Unpack result if required.
21615   if (VT != PackedVT)
21616     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, VT, Op);
21617 
21618   return Op;
21619 }
21620 
21621 bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
21622                                                  SDValue N) const {
21623   return ::isAllActivePredicate(DAG, N);
21624 }
21625 
21626 EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {
21627   return ::getPromotedVTForPredicate(VT);
21628 }
21629 
21630 bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
21631     SDValue Op, const APInt &OriginalDemandedBits,
21632     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
21633     unsigned Depth) const {
21634 
21635   unsigned Opc = Op.getOpcode();
21636   switch (Opc) {
21637   case AArch64ISD::VSHL: {
21638     // Match (VSHL (VLSHR Val X) X)
21639     SDValue ShiftL = Op;
21640     SDValue ShiftR = Op->getOperand(0);
21641     if (ShiftR->getOpcode() != AArch64ISD::VLSHR)
21642       return false;
21643 
21644     if (!ShiftL.hasOneUse() || !ShiftR.hasOneUse())
21645       return false;
21646 
21647     unsigned ShiftLBits = ShiftL->getConstantOperandVal(1);
21648     unsigned ShiftRBits = ShiftR->getConstantOperandVal(1);
21649 
21650     // Other cases can be handled as well, but this is not
21651     // implemented.
21652     if (ShiftRBits != ShiftLBits)
21653       return false;
21654 
21655     unsigned ScalarSize = Op.getScalarValueSizeInBits();
21656     assert(ScalarSize > ShiftLBits && "Invalid shift imm");
21657 
21658     APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits);
21659     APInt UnusedBits = ~OriginalDemandedBits;
21660 
21661     if ((ZeroBits & UnusedBits) != ZeroBits)
21662       return false;
21663 
21664     // All bits that are zeroed by (VSHL (VLSHR Val X) X) are not
21665     // used - simplify to just Val.
21666     return TLO.CombineTo(Op, ShiftR->getOperand(0));
21667   }
21668   }
21669 
21670   return TargetLowering::SimplifyDemandedBitsForTargetNode(
21671       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
21672 }
21673 
21674 bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {
21675   return Op.getOpcode() == AArch64ISD::DUP ||
21676          (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
21677           Op.getOperand(0).getOpcode() == AArch64ISD::DUP) ||
21678          TargetLowering::isTargetCanonicalConstantNode(Op);
21679 }
21680 
21681 bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal(
21682     unsigned Opc, LLT Ty1, LLT Ty2) const {
21683   return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64));
21684 }
21685