1 //===-- RISCVISelLowering.cpp - RISC-V DAG Lowering Implementation  -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISC-V uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
32 #include "llvm/CodeGen/ValueTypes.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/IR/DiagnosticPrinter.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicsRISCV.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/InstructionCost.h"
43 #include "llvm/Support/KnownBits.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include <optional>
47 
48 using namespace llvm;
49 
50 #define DEBUG_TYPE "riscv-lower"
51 
52 STATISTIC(NumTailCalls, "Number of tail calls");
53 
54 static cl::opt<unsigned> ExtensionMaxWebSize(
55     DEBUG_TYPE "-ext-max-web-size", cl::Hidden,
56     cl::desc("Give the maximum size (in number of nodes) of the web of "
57              "instructions that we will consider for VW expansion"),
58     cl::init(18));
59 
60 static cl::opt<bool>
61     AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden,
62                      cl::desc("Allow the formation of VW_W operations (e.g., "
63                               "VWADD_W) with splat constants"),
64                      cl::init(false));
65 
66 static cl::opt<unsigned> NumRepeatedDivisors(
67     DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden,
68     cl::desc("Set the minimum number of repetitions of a divisor to allow "
69              "transformation to multiplications by the reciprocal"),
70     cl::init(2));
71 
72 static cl::opt<int>
73     FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden,
74               cl::desc("Give the maximum number of instructions that we will "
75                        "use for creating a floating-point immediate value"),
76               cl::init(2));
77 
78 static cl::opt<bool>
79     RV64LegalI32("riscv-experimental-rv64-legal-i32", cl::ReallyHidden,
80                  cl::desc("Make i32 a legal type for SelectionDAG on RV64."));
81 
82 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
83                                          const RISCVSubtarget &STI)
84     : TargetLowering(TM), Subtarget(STI) {
85 
86   if (Subtarget.isRVE())
87     report_fatal_error("Codegen not yet implemented for RVE");
88 
89   RISCVABI::ABI ABI = Subtarget.getTargetABI();
90   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
91 
92   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
93       !Subtarget.hasStdExtF()) {
94     errs() << "Hard-float 'f' ABI can't be used for a target that "
95                 "doesn't support the F instruction set extension (ignoring "
96                           "target-abi)\n";
97     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
98   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
99              !Subtarget.hasStdExtD()) {
100     errs() << "Hard-float 'd' ABI can't be used for a target that "
101               "doesn't support the D instruction set extension (ignoring "
102               "target-abi)\n";
103     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
104   }
105 
106   switch (ABI) {
107   default:
108     report_fatal_error("Don't know how to lower this ABI");
109   case RISCVABI::ABI_ILP32:
110   case RISCVABI::ABI_ILP32F:
111   case RISCVABI::ABI_ILP32D:
112   case RISCVABI::ABI_LP64:
113   case RISCVABI::ABI_LP64F:
114   case RISCVABI::ABI_LP64D:
115     break;
116   }
117 
118   MVT XLenVT = Subtarget.getXLenVT();
119 
120   // Set up the register classes.
121   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
122   if (Subtarget.is64Bit() && RV64LegalI32)
123     addRegisterClass(MVT::i32, &RISCV::GPRRegClass);
124 
125   if (Subtarget.hasStdExtZfhOrZfhmin())
126     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
127   if (Subtarget.hasStdExtZfbfmin())
128     addRegisterClass(MVT::bf16, &RISCV::FPR16RegClass);
129   if (Subtarget.hasStdExtF())
130     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
131   if (Subtarget.hasStdExtD())
132     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
133   if (Subtarget.hasStdExtZhinxOrZhinxmin())
134     addRegisterClass(MVT::f16, &RISCV::GPRF16RegClass);
135   if (Subtarget.hasStdExtZfinx())
136     addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass);
137   if (Subtarget.hasStdExtZdinx()) {
138     if (Subtarget.is64Bit())
139       addRegisterClass(MVT::f64, &RISCV::GPRRegClass);
140     else
141       addRegisterClass(MVT::f64, &RISCV::GPRPF64RegClass);
142   }
143 
144   static const MVT::SimpleValueType BoolVecVTs[] = {
145       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
146       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
147   static const MVT::SimpleValueType IntVecVTs[] = {
148       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
149       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
150       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
151       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
152       MVT::nxv4i64, MVT::nxv8i64};
153   static const MVT::SimpleValueType F16VecVTs[] = {
154       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
155       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
156   static const MVT::SimpleValueType BF16VecVTs[] = {
157       MVT::nxv1bf16, MVT::nxv2bf16,  MVT::nxv4bf16,
158       MVT::nxv8bf16, MVT::nxv16bf16, MVT::nxv32bf16};
159   static const MVT::SimpleValueType F32VecVTs[] = {
160       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
161   static const MVT::SimpleValueType F64VecVTs[] = {
162       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
163 
164   if (Subtarget.hasVInstructions()) {
165     auto addRegClassForRVV = [this](MVT VT) {
166       // Disable the smallest fractional LMUL types if ELEN is less than
167       // RVVBitsPerBlock.
168       unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELen();
169       if (VT.getVectorMinNumElements() < MinElts)
170         return;
171 
172       unsigned Size = VT.getSizeInBits().getKnownMinValue();
173       const TargetRegisterClass *RC;
174       if (Size <= RISCV::RVVBitsPerBlock)
175         RC = &RISCV::VRRegClass;
176       else if (Size == 2 * RISCV::RVVBitsPerBlock)
177         RC = &RISCV::VRM2RegClass;
178       else if (Size == 4 * RISCV::RVVBitsPerBlock)
179         RC = &RISCV::VRM4RegClass;
180       else if (Size == 8 * RISCV::RVVBitsPerBlock)
181         RC = &RISCV::VRM8RegClass;
182       else
183         llvm_unreachable("Unexpected size");
184 
185       addRegisterClass(VT, RC);
186     };
187 
188     for (MVT VT : BoolVecVTs)
189       addRegClassForRVV(VT);
190     for (MVT VT : IntVecVTs) {
191       if (VT.getVectorElementType() == MVT::i64 &&
192           !Subtarget.hasVInstructionsI64())
193         continue;
194       addRegClassForRVV(VT);
195     }
196 
197     if (Subtarget.hasVInstructionsF16Minimal())
198       for (MVT VT : F16VecVTs)
199         addRegClassForRVV(VT);
200 
201     if (Subtarget.hasVInstructionsBF16())
202       for (MVT VT : BF16VecVTs)
203         addRegClassForRVV(VT);
204 
205     if (Subtarget.hasVInstructionsF32())
206       for (MVT VT : F32VecVTs)
207         addRegClassForRVV(VT);
208 
209     if (Subtarget.hasVInstructionsF64())
210       for (MVT VT : F64VecVTs)
211         addRegClassForRVV(VT);
212 
213     if (Subtarget.useRVVForFixedLengthVectors()) {
214       auto addRegClassForFixedVectors = [this](MVT VT) {
215         MVT ContainerVT = getContainerForFixedLengthVector(VT);
216         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
217         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
218         addRegisterClass(VT, TRI.getRegClass(RCID));
219       };
220       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
221         if (useRVVForFixedLengthVectorVT(VT))
222           addRegClassForFixedVectors(VT);
223 
224       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
225         if (useRVVForFixedLengthVectorVT(VT))
226           addRegClassForFixedVectors(VT);
227     }
228   }
229 
230   // Compute derived properties from the register classes.
231   computeRegisterProperties(STI.getRegisterInfo());
232 
233   setStackPointerRegisterToSaveRestore(RISCV::X2);
234 
235   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
236                    MVT::i1, Promote);
237   // DAGCombiner can call isLoadExtLegal for types that aren't legal.
238   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i32,
239                    MVT::i1, Promote);
240 
241   // TODO: add all necessary setOperationAction calls.
242   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
243 
244   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
245   setOperationAction(ISD::BR_CC, XLenVT, Expand);
246   if (RV64LegalI32 && Subtarget.is64Bit())
247     setOperationAction(ISD::BR_CC, MVT::i32, Expand);
248   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
249   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
250   if (RV64LegalI32 && Subtarget.is64Bit())
251     setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
252 
253   setCondCodeAction(ISD::SETLE, XLenVT, Expand);
254   setCondCodeAction(ISD::SETGT, XLenVT, Custom);
255   setCondCodeAction(ISD::SETGE, XLenVT, Expand);
256   setCondCodeAction(ISD::SETULE, XLenVT, Expand);
257   setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
258   setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
259 
260   if (RV64LegalI32 && Subtarget.is64Bit())
261     setOperationAction(ISD::SETCC, MVT::i32, Promote);
262 
263   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
264 
265   setOperationAction(ISD::VASTART, MVT::Other, Custom);
266   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
267 
268   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
269 
270   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
271 
272   if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb())
273     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
274 
275   if (Subtarget.is64Bit()) {
276     setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
277 
278     if (!RV64LegalI32) {
279       setOperationAction(ISD::LOAD, MVT::i32, Custom);
280       setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
281                          MVT::i32, Custom);
282       setOperationAction(ISD::SADDO, MVT::i32, Custom);
283       setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
284                          MVT::i32, Custom);
285     }
286   } else {
287     setLibcallName(
288         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
289         nullptr);
290     setLibcallName(RTLIB::MULO_I64, nullptr);
291   }
292 
293   if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul()) {
294     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, XLenVT, Expand);
295     if (RV64LegalI32 && Subtarget.is64Bit())
296       setOperationAction(ISD::MUL, MVT::i32, Promote);
297   } else if (Subtarget.is64Bit()) {
298     setOperationAction(ISD::MUL, MVT::i128, Custom);
299     if (!RV64LegalI32)
300       setOperationAction(ISD::MUL, MVT::i32, Custom);
301   } else {
302     setOperationAction(ISD::MUL, MVT::i64, Custom);
303   }
304 
305   if (!Subtarget.hasStdExtM()) {
306     setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM},
307                        XLenVT, Expand);
308     if (RV64LegalI32 && Subtarget.is64Bit())
309       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, MVT::i32,
310                          Promote);
311   } else if (Subtarget.is64Bit()) {
312     if (!RV64LegalI32)
313       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
314                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
315   }
316 
317   if (RV64LegalI32 && Subtarget.is64Bit()) {
318     setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::i32, Expand);
319     setOperationAction(
320         {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32,
321         Expand);
322   }
323 
324   setOperationAction(
325       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
326       Expand);
327 
328   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
329                      Custom);
330 
331   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
332     if (!RV64LegalI32 && Subtarget.is64Bit())
333       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
334   } else if (Subtarget.hasVendorXTHeadBb()) {
335     if (Subtarget.is64Bit())
336       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
337     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Custom);
338   } else if (Subtarget.hasVendorXCVbitmanip()) {
339     setOperationAction(ISD::ROTL, XLenVT, Expand);
340   } else {
341     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
342     if (RV64LegalI32 && Subtarget.is64Bit())
343       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Expand);
344   }
345 
346   // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
347   // pattern match it directly in isel.
348   setOperationAction(ISD::BSWAP, XLenVT,
349                      (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
350                       Subtarget.hasVendorXTHeadBb())
351                          ? Legal
352                          : Expand);
353   if (RV64LegalI32 && Subtarget.is64Bit())
354     setOperationAction(ISD::BSWAP, MVT::i32,
355                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
356                         Subtarget.hasVendorXTHeadBb())
357                            ? Promote
358                            : Expand);
359 
360 
361   if (Subtarget.hasVendorXCVbitmanip()) {
362     setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
363   } else {
364     // Zbkb can use rev8+brev8 to implement bitreverse.
365     setOperationAction(ISD::BITREVERSE, XLenVT,
366                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
367   }
368 
369   if (Subtarget.hasStdExtZbb()) {
370     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
371                        Legal);
372     if (RV64LegalI32 && Subtarget.is64Bit())
373       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, MVT::i32,
374                          Promote);
375 
376     if (Subtarget.is64Bit()) {
377       if (RV64LegalI32)
378         setOperationAction(ISD::CTTZ, MVT::i32, Legal);
379       else
380         setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
381     }
382   } else if (!Subtarget.hasVendorXCVbitmanip()) {
383     setOperationAction({ISD::CTTZ, ISD::CTPOP}, XLenVT, Expand);
384     if (RV64LegalI32 && Subtarget.is64Bit())
385       setOperationAction({ISD::CTTZ, ISD::CTPOP}, MVT::i32, Expand);
386   }
387 
388   if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
389       Subtarget.hasVendorXCVbitmanip()) {
390     // We need the custom lowering to make sure that the resulting sequence
391     // for the 32bit case is efficient on 64bit targets.
392     if (Subtarget.is64Bit()) {
393       if (RV64LegalI32) {
394         setOperationAction(ISD::CTLZ, MVT::i32,
395                            Subtarget.hasStdExtZbb() ? Legal : Promote);
396         if (!Subtarget.hasStdExtZbb())
397           setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
398       } else
399         setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
400     }
401   } else {
402     setOperationAction(ISD::CTLZ, XLenVT, Expand);
403     if (RV64LegalI32 && Subtarget.is64Bit())
404       setOperationAction(ISD::CTLZ, MVT::i32, Expand);
405   }
406 
407   if (!RV64LegalI32 && Subtarget.is64Bit() &&
408       !Subtarget.hasShortForwardBranchOpt())
409     setOperationAction(ISD::ABS, MVT::i32, Custom);
410 
411   // We can use PseudoCCSUB to implement ABS.
412   if (Subtarget.hasShortForwardBranchOpt())
413     setOperationAction(ISD::ABS, XLenVT, Legal);
414 
415   if (!Subtarget.hasVendorXTHeadCondMov())
416     setOperationAction(ISD::SELECT, XLenVT, Custom);
417 
418   if (RV64LegalI32 && Subtarget.is64Bit())
419     setOperationAction(ISD::SELECT, MVT::i32, Promote);
420 
421   static const unsigned FPLegalNodeTypes[] = {
422       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
423       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
424       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
425       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
426       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
427       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
428 
429   static const ISD::CondCode FPCCToExpand[] = {
430       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
431       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
432       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
433 
434   static const unsigned FPOpToExpand[] = {
435       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
436       ISD::FREM};
437 
438   static const unsigned FPRndMode[] = {
439       ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
440       ISD::FROUNDEVEN};
441 
442   if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin())
443     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
444 
445   static const unsigned ZfhminZfbfminPromoteOps[] = {
446       ISD::FMINNUM,      ISD::FMAXNUM,       ISD::FADD,
447       ISD::FSUB,         ISD::FMUL,          ISD::FMA,
448       ISD::FDIV,         ISD::FSQRT,         ISD::FABS,
449       ISD::FNEG,         ISD::STRICT_FMA,    ISD::STRICT_FADD,
450       ISD::STRICT_FSUB,  ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
451       ISD::STRICT_FSQRT, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS,
452       ISD::SETCC,        ISD::FCEIL,         ISD::FFLOOR,
453       ISD::FTRUNC,       ISD::FRINT,         ISD::FROUND,
454       ISD::FROUNDEVEN,   ISD::SELECT};
455 
456   if (Subtarget.hasStdExtZfbfmin()) {
457     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
458     setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
459     setOperationAction(ISD::FP_ROUND, MVT::bf16, Custom);
460     setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
461     setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
462     setOperationAction(ISD::ConstantFP, MVT::bf16, Expand);
463     setOperationAction(ISD::SELECT_CC, MVT::bf16, Expand);
464     setOperationAction(ISD::BR_CC, MVT::bf16, Expand);
465     setOperationAction(ZfhminZfbfminPromoteOps, MVT::bf16, Promote);
466     setOperationAction(ISD::FREM, MVT::bf16, Promote);
467     // FIXME: Need to promote bf16 FCOPYSIGN to f32, but the
468     // DAGCombiner::visitFP_ROUND probably needs improvements first.
469     setOperationAction(ISD::FCOPYSIGN, MVT::bf16, Expand);
470   }
471 
472   if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) {
473     if (Subtarget.hasStdExtZfhOrZhinx()) {
474       setOperationAction(FPLegalNodeTypes, MVT::f16, Legal);
475       setOperationAction(FPRndMode, MVT::f16,
476                          Subtarget.hasStdExtZfa() ? Legal : Custom);
477       setOperationAction(ISD::SELECT, MVT::f16, Custom);
478       setOperationAction(ISD::IS_FPCLASS, MVT::f16, Custom);
479     } else {
480       setOperationAction(ZfhminZfbfminPromoteOps, MVT::f16, Promote);
481       setOperationAction({ISD::STRICT_LRINT, ISD::STRICT_LLRINT,
482                           ISD::STRICT_LROUND, ISD::STRICT_LLROUND},
483                          MVT::f16, Legal);
484       // FIXME: Need to promote f16 FCOPYSIGN to f32, but the
485       // DAGCombiner::visitFP_ROUND probably needs improvements first.
486       setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
487     }
488 
489     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
490     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
491     setCondCodeAction(FPCCToExpand, MVT::f16, Expand);
492     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
493     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
494 
495     setOperationAction(ISD::FNEARBYINT, MVT::f16,
496                        Subtarget.hasStdExtZfa() ? Legal : Promote);
497     setOperationAction({ISD::FREM, ISD::FPOW, ISD::FPOWI,
498                         ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP,
499                         ISD::FEXP2, ISD::FEXP10, ISD::FLOG, ISD::FLOG2,
500                         ISD::FLOG10},
501                        MVT::f16, Promote);
502 
503     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
504     // complete support for all operations in LegalizeDAG.
505     setOperationAction({ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
506                         ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT,
507                         ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN,
508                         ISD::STRICT_FTRUNC},
509                        MVT::f16, Promote);
510 
511     // We need to custom promote this.
512     if (Subtarget.is64Bit())
513       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
514 
515     if (!Subtarget.hasStdExtZfa())
516       setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::f16, Custom);
517   }
518 
519   if (Subtarget.hasStdExtFOrZfinx()) {
520     setOperationAction(FPLegalNodeTypes, MVT::f32, Legal);
521     setOperationAction(FPRndMode, MVT::f32,
522                        Subtarget.hasStdExtZfa() ? Legal : Custom);
523     setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
524     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
525     setOperationAction(ISD::SELECT, MVT::f32, Custom);
526     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
527     setOperationAction(FPOpToExpand, MVT::f32, Expand);
528     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
529     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
530     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
531     setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
532     setOperationAction(ISD::IS_FPCLASS, MVT::f32, Custom);
533     setOperationAction(ISD::BF16_TO_FP, MVT::f32, Custom);
534     setOperationAction(ISD::FP_TO_BF16, MVT::f32,
535                        Subtarget.isSoftFPABI() ? LibCall : Custom);
536     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
537     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Custom);
538 
539     if (Subtarget.hasStdExtZfa())
540       setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
541     else
542       setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::f32, Custom);
543   }
544 
545   if (Subtarget.hasStdExtFOrZfinx() && Subtarget.is64Bit())
546     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
547 
548   if (Subtarget.hasStdExtDOrZdinx()) {
549     setOperationAction(FPLegalNodeTypes, MVT::f64, Legal);
550 
551     if (Subtarget.hasStdExtZfa()) {
552       setOperationAction(FPRndMode, MVT::f64, Legal);
553       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
554       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
555       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
556     } else {
557       if (Subtarget.is64Bit())
558         setOperationAction(FPRndMode, MVT::f64, Custom);
559 
560       setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, MVT::f64, Custom);
561     }
562 
563     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
564     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
565     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
566     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
567     setOperationAction(ISD::SELECT, MVT::f64, Custom);
568     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
569     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
570     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
571     setOperationAction(FPOpToExpand, MVT::f64, Expand);
572     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
573     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
574     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
575     setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
576     setOperationAction(ISD::IS_FPCLASS, MVT::f64, Custom);
577     setOperationAction(ISD::BF16_TO_FP, MVT::f64, Custom);
578     setOperationAction(ISD::FP_TO_BF16, MVT::f64,
579                        Subtarget.isSoftFPABI() ? LibCall : Custom);
580     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
581     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
582   }
583 
584   if (Subtarget.is64Bit()) {
585     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
586                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
587                        MVT::i32, Custom);
588     setOperationAction(ISD::LROUND, MVT::i32, Custom);
589   }
590 
591   if (Subtarget.hasStdExtFOrZfinx()) {
592     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
593                        Custom);
594 
595     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
596                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
597                        XLenVT, Legal);
598 
599     if (RV64LegalI32 && Subtarget.is64Bit())
600       setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
601                           ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
602                          MVT::i32, Legal);
603 
604     setOperationAction(ISD::GET_ROUNDING, XLenVT, Custom);
605     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
606   }
607 
608   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
609                       ISD::JumpTable},
610                      XLenVT, Custom);
611 
612   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
613 
614   if (Subtarget.is64Bit())
615     setOperationAction(ISD::Constant, MVT::i64, Custom);
616 
617   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
618   // Unfortunately this can't be determined just from the ISA naming string.
619   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
620                      Subtarget.is64Bit() ? Legal : Custom);
621 
622   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
623   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
624   if (Subtarget.is64Bit())
625     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
626 
627   if (Subtarget.hasStdExtZicbop()) {
628     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
629   }
630 
631   if (Subtarget.hasStdExtA()) {
632     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
633     setMinCmpXchgSizeInBits(32);
634   } else if (Subtarget.hasForcedAtomics()) {
635     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
636   } else {
637     setMaxAtomicSizeInBitsSupported(0);
638   }
639 
640   setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
641 
642   setBooleanContents(ZeroOrOneBooleanContent);
643 
644   if (Subtarget.hasVInstructions()) {
645     setBooleanVectorContents(ZeroOrOneBooleanContent);
646 
647     setOperationAction(ISD::VSCALE, XLenVT, Custom);
648     if (RV64LegalI32 && Subtarget.is64Bit())
649       setOperationAction(ISD::VSCALE, MVT::i32, Custom);
650 
651     // RVV intrinsics may have illegal operands.
652     // We also need to custom legalize vmv.x.s.
653     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN,
654                         ISD::INTRINSIC_VOID},
655                        {MVT::i8, MVT::i16}, Custom);
656     if (Subtarget.is64Bit())
657       setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
658                          MVT::i32, Custom);
659     else
660       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
661                          MVT::i64, Custom);
662 
663     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
664                        MVT::Other, Custom);
665 
666     static const unsigned IntegerVPOps[] = {
667         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
668         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
669         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
670         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
671         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
672         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
673         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
674         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FP_TO_SINT,
675         ISD::VP_FP_TO_UINT,  ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
676         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE,    ISD::VP_SMIN,
677         ISD::VP_SMAX,        ISD::VP_UMIN,        ISD::VP_UMAX,
678         ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE};
679 
680     static const unsigned FloatingPointVPOps[] = {
681         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
682         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FABS,
683         ISD::VP_FMA,         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
684         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
685         ISD::VP_SELECT,      ISD::VP_SINT_TO_FP,  ISD::VP_UINT_TO_FP,
686         ISD::VP_SETCC,       ISD::VP_FP_ROUND,    ISD::VP_FP_EXTEND,
687         ISD::VP_SQRT,        ISD::VP_FMINNUM,     ISD::VP_FMAXNUM,
688         ISD::VP_FCEIL,       ISD::VP_FFLOOR,      ISD::VP_FROUND,
689         ISD::VP_FROUNDEVEN,  ISD::VP_FCOPYSIGN,   ISD::VP_FROUNDTOZERO,
690         ISD::VP_FRINT,       ISD::VP_FNEARBYINT,  ISD::VP_IS_FPCLASS,
691         ISD::EXPERIMENTAL_VP_REVERSE};
692 
693     static const unsigned IntegerVecReduceOps[] = {
694         ISD::VECREDUCE_ADD,  ISD::VECREDUCE_AND,  ISD::VECREDUCE_OR,
695         ISD::VECREDUCE_XOR,  ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
696         ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN};
697 
698     static const unsigned FloatingPointVecReduceOps[] = {
699         ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_FMIN,
700         ISD::VECREDUCE_FMAX};
701 
702     if (!Subtarget.is64Bit()) {
703       // We must custom-lower certain vXi64 operations on RV32 due to the vector
704       // element type being illegal.
705       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
706                          MVT::i64, Custom);
707 
708       setOperationAction(IntegerVecReduceOps, MVT::i64, Custom);
709 
710       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
711                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
712                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
713                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
714                          MVT::i64, Custom);
715     }
716 
717     for (MVT VT : BoolVecVTs) {
718       if (!isTypeLegal(VT))
719         continue;
720 
721       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
722 
723       // Mask VTs are custom-expanded into a series of standard nodes
724       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
725                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR,
726                           ISD::SCALAR_TO_VECTOR},
727                          VT, Custom);
728 
729       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
730                          Custom);
731 
732       setOperationAction(ISD::SELECT, VT, Custom);
733       setOperationAction(
734           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
735           Expand);
736 
737       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
738 
739       setOperationAction(
740           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
741           Custom);
742 
743       setOperationAction(
744           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
745           Custom);
746 
747       // RVV has native int->float & float->int conversions where the
748       // element type sizes are within one power-of-two of each other. Any
749       // wider distances between type sizes have to be lowered as sequences
750       // which progressively narrow the gap in stages.
751       setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
752                           ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
753                           ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
754                           ISD::STRICT_FP_TO_UINT},
755                          VT, Custom);
756       setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
757                          Custom);
758 
759       // Expand all extending loads to types larger than this, and truncating
760       // stores from types larger than this.
761       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
762         setTruncStoreAction(VT, OtherVT, Expand);
763         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT,
764                          OtherVT, Expand);
765       }
766 
767       setOperationAction({ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
768                           ISD::VP_TRUNCATE, ISD::VP_SETCC},
769                          VT, Custom);
770 
771       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
772       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
773 
774       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
775 
776       setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
777 
778       setOperationPromotedToType(
779           ISD::VECTOR_SPLICE, VT,
780           MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()));
781     }
782 
783     for (MVT VT : IntVecVTs) {
784       if (!isTypeLegal(VT))
785         continue;
786 
787       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
788       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
789 
790       // Vectors implement MULHS/MULHU.
791       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
792 
793       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
794       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
795         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
796 
797       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
798                          Legal);
799 
800       // Custom-lower extensions and truncations from/to mask types.
801       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
802                          VT, Custom);
803 
804       // RVV has native int->float & float->int conversions where the
805       // element type sizes are within one power-of-two of each other. Any
806       // wider distances between type sizes have to be lowered as sequences
807       // which progressively narrow the gap in stages.
808       setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
809                           ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
810                           ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
811                           ISD::STRICT_FP_TO_UINT},
812                          VT, Custom);
813       setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
814                          Custom);
815       setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
816       setOperationAction(
817           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
818 
819       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
820       // nodes which truncate by one power of two at a time.
821       setOperationAction(ISD::TRUNCATE, VT, Custom);
822 
823       // Custom-lower insert/extract operations to simplify patterns.
824       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
825                          Custom);
826 
827       // Custom-lower reduction operations to set up the corresponding custom
828       // nodes' operands.
829       setOperationAction(IntegerVecReduceOps, VT, Custom);
830 
831       setOperationAction(IntegerVPOps, VT, Custom);
832 
833       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
834 
835       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
836                          VT, Custom);
837 
838       setOperationAction(
839           {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
840            ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
841           VT, Custom);
842 
843       setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
844                           ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
845                          VT, Custom);
846 
847       setOperationAction(ISD::SELECT, VT, Custom);
848       setOperationAction(ISD::SELECT_CC, VT, Expand);
849 
850       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
851 
852       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
853         setTruncStoreAction(VT, OtherVT, Expand);
854         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT,
855                          OtherVT, Expand);
856       }
857 
858       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
859       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
860 
861       // Splice
862       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
863 
864       if (Subtarget.hasStdExtZvkb()) {
865         setOperationAction(ISD::BSWAP, VT, Legal);
866         setOperationAction(ISD::VP_BSWAP, VT, Custom);
867       } else {
868         setOperationAction({ISD::BSWAP, ISD::VP_BSWAP}, VT, Expand);
869         setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
870       }
871 
872       if (Subtarget.hasStdExtZvbb()) {
873         setOperationAction(ISD::BITREVERSE, VT, Legal);
874         setOperationAction(ISD::VP_BITREVERSE, VT, Custom);
875         setOperationAction({ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ,
876                             ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
877                            VT, Custom);
878       } else {
879         setOperationAction({ISD::BITREVERSE, ISD::VP_BITREVERSE}, VT, Expand);
880         setOperationAction({ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}, VT, Expand);
881         setOperationAction({ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ,
882                             ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
883                            VT, Expand);
884 
885         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the
886         // range of f32.
887         EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
888         if (isTypeLegal(FloatVT)) {
889           setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
890                               ISD::CTTZ_ZERO_UNDEF, ISD::VP_CTLZ,
891                               ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ_ZERO_UNDEF},
892                              VT, Custom);
893         }
894       }
895     }
896 
897     // Expand various CCs to best match the RVV ISA, which natively supports UNE
898     // but no other unordered comparisons, and supports all ordered comparisons
899     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
900     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
901     // and we pattern-match those back to the "original", swapping operands once
902     // more. This way we catch both operations and both "vf" and "fv" forms with
903     // fewer patterns.
904     static const ISD::CondCode VFPCCToExpand[] = {
905         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
906         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
907         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
908     };
909 
910     // TODO: support more ops.
911     static const unsigned ZvfhminPromoteOps[] = {
912         ISD::FMINNUM,     ISD::FMAXNUM,      ISD::FADD,        ISD::FSUB,
913         ISD::FMUL,        ISD::FMA,          ISD::FDIV,        ISD::FSQRT,
914         ISD::FABS,        ISD::FNEG,         ISD::FCOPYSIGN,   ISD::FCEIL,
915         ISD::FFLOOR,      ISD::FROUND,       ISD::FROUNDEVEN,  ISD::FRINT,
916         ISD::FNEARBYINT,  ISD::IS_FPCLASS,   ISD::SETCC,       ISD::FMAXIMUM,
917         ISD::FMINIMUM,    ISD::STRICT_FADD,  ISD::STRICT_FSUB, ISD::STRICT_FMUL,
918         ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA};
919 
920     // TODO: support more vp ops.
921     static const unsigned ZvfhminPromoteVPOps[] = {
922         ISD::VP_FADD,        ISD::VP_FSUB,         ISD::VP_FMUL,
923         ISD::VP_FDIV,        ISD::VP_FNEG,         ISD::VP_FABS,
924         ISD::VP_FMA,         ISD::VP_REDUCE_FADD,  ISD::VP_REDUCE_SEQ_FADD,
925         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,  ISD::VP_SQRT,
926         ISD::VP_FMINNUM,     ISD::VP_FMAXNUM,      ISD::VP_FCEIL,
927         ISD::VP_FFLOOR,      ISD::VP_FROUND,       ISD::VP_FROUNDEVEN,
928         ISD::VP_FCOPYSIGN,   ISD::VP_FROUNDTOZERO, ISD::VP_FRINT,
929         ISD::VP_FNEARBYINT,  ISD::VP_SETCC};
930 
931     // Sets common operation actions on RVV floating-point vector types.
932     const auto SetCommonVFPActions = [&](MVT VT) {
933       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
934       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
935       // sizes are within one power-of-two of each other. Therefore conversions
936       // between vXf16 and vXf64 must be lowered as sequences which convert via
937       // vXf32.
938       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
939       // Custom-lower insert/extract operations to simplify patterns.
940       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
941                          Custom);
942       // Expand various condition codes (explained above).
943       setCondCodeAction(VFPCCToExpand, VT, Expand);
944 
945       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
946       setOperationAction({ISD::FMAXIMUM, ISD::FMINIMUM}, VT, Custom);
947 
948       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
949                           ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT,
950                           ISD::IS_FPCLASS},
951                          VT, Custom);
952 
953       setOperationAction(FloatingPointVecReduceOps, VT, Custom);
954 
955       // Expand FP operations that need libcalls.
956       setOperationAction(ISD::FREM, VT, Expand);
957       setOperationAction(ISD::FPOW, VT, Expand);
958       setOperationAction(ISD::FCOS, VT, Expand);
959       setOperationAction(ISD::FSIN, VT, Expand);
960       setOperationAction(ISD::FSINCOS, VT, Expand);
961       setOperationAction(ISD::FEXP, VT, Expand);
962       setOperationAction(ISD::FEXP2, VT, Expand);
963       setOperationAction(ISD::FEXP10, VT, Expand);
964       setOperationAction(ISD::FLOG, VT, Expand);
965       setOperationAction(ISD::FLOG2, VT, Expand);
966       setOperationAction(ISD::FLOG10, VT, Expand);
967 
968       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
969 
970       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
971 
972       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
973                          VT, Custom);
974 
975       setOperationAction(
976           {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
977            ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
978           VT, Custom);
979 
980       setOperationAction(ISD::SELECT, VT, Custom);
981       setOperationAction(ISD::SELECT_CC, VT, Expand);
982 
983       setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
984                           ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
985                          VT, Custom);
986 
987       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
988       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
989 
990       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
991 
992       setOperationAction(FloatingPointVPOps, VT, Custom);
993 
994       setOperationAction({ISD::STRICT_FP_EXTEND, ISD::STRICT_FP_ROUND}, VT,
995                          Custom);
996       setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
997                           ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA},
998                          VT, Legal);
999       setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS,
1000                           ISD::STRICT_FTRUNC, ISD::STRICT_FCEIL,
1001                           ISD::STRICT_FFLOOR, ISD::STRICT_FROUND,
1002                           ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT},
1003                          VT, Custom);
1004     };
1005 
1006     // Sets common extload/truncstore actions on RVV floating-point vector
1007     // types.
1008     const auto SetCommonVFPExtLoadTruncStoreActions =
1009         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
1010           for (auto SmallVT : SmallerVTs) {
1011             setTruncStoreAction(VT, SmallVT, Expand);
1012             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
1013           }
1014         };
1015 
1016     if (Subtarget.hasVInstructionsF16()) {
1017       for (MVT VT : F16VecVTs) {
1018         if (!isTypeLegal(VT))
1019           continue;
1020         SetCommonVFPActions(VT);
1021       }
1022     } else if (Subtarget.hasVInstructionsF16Minimal()) {
1023       for (MVT VT : F16VecVTs) {
1024         if (!isTypeLegal(VT))
1025           continue;
1026         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
1027         setOperationAction({ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
1028                            Custom);
1029         setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
1030         setOperationAction({ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
1031                            Custom);
1032         setOperationAction(ISD::SELECT_CC, VT, Expand);
1033         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP,
1034                             ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1035                            VT, Custom);
1036         setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1037                             ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
1038                            VT, Custom);
1039         setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1040         // load/store
1041         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
1042 
1043         // Custom split nxv32f16 since nxv32f32 if not legal.
1044         if (VT == MVT::nxv32f16) {
1045           setOperationAction(ZvfhminPromoteOps, VT, Custom);
1046           setOperationAction(ZvfhminPromoteVPOps, VT, Custom);
1047           continue;
1048         }
1049         // Add more promote ops.
1050         MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1051         setOperationPromotedToType(ZvfhminPromoteOps, VT, F32VecVT);
1052         setOperationPromotedToType(ZvfhminPromoteVPOps, VT, F32VecVT);
1053       }
1054     }
1055 
1056     if (Subtarget.hasVInstructionsF32()) {
1057       for (MVT VT : F32VecVTs) {
1058         if (!isTypeLegal(VT))
1059           continue;
1060         SetCommonVFPActions(VT);
1061         SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1062       }
1063     }
1064 
1065     if (Subtarget.hasVInstructionsF64()) {
1066       for (MVT VT : F64VecVTs) {
1067         if (!isTypeLegal(VT))
1068           continue;
1069         SetCommonVFPActions(VT);
1070         SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1071         SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
1072       }
1073     }
1074 
1075     if (Subtarget.useRVVForFixedLengthVectors()) {
1076       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1077         if (!useRVVForFixedLengthVectorVT(VT))
1078           continue;
1079 
1080         // By default everything must be expanded.
1081         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1082           setOperationAction(Op, VT, Expand);
1083         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
1084           setTruncStoreAction(VT, OtherVT, Expand);
1085           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT,
1086                            OtherVT, Expand);
1087         }
1088 
1089         // Custom lower fixed vector undefs to scalable vector undefs to avoid
1090         // expansion to a build_vector of 0s.
1091         setOperationAction(ISD::UNDEF, VT, Custom);
1092 
1093         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
1094         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
1095                            Custom);
1096 
1097         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
1098                            Custom);
1099 
1100         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
1101                            VT, Custom);
1102 
1103         setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1104 
1105         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
1106 
1107         setOperationAction(ISD::SETCC, VT, Custom);
1108 
1109         setOperationAction(ISD::SELECT, VT, Custom);
1110 
1111         setOperationAction(ISD::TRUNCATE, VT, Custom);
1112 
1113         setOperationAction(ISD::BITCAST, VT, Custom);
1114 
1115         setOperationAction(
1116             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
1117             Custom);
1118 
1119         setOperationAction(
1120             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
1121             Custom);
1122 
1123         setOperationAction(
1124             {
1125                 ISD::SINT_TO_FP,
1126                 ISD::UINT_TO_FP,
1127                 ISD::FP_TO_SINT,
1128                 ISD::FP_TO_UINT,
1129                 ISD::STRICT_SINT_TO_FP,
1130                 ISD::STRICT_UINT_TO_FP,
1131                 ISD::STRICT_FP_TO_SINT,
1132                 ISD::STRICT_FP_TO_UINT,
1133             },
1134             VT, Custom);
1135         setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
1136                            Custom);
1137 
1138         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1139 
1140         // Operations below are different for between masks and other vectors.
1141         if (VT.getVectorElementType() == MVT::i1) {
1142           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
1143                               ISD::OR, ISD::XOR},
1144                              VT, Custom);
1145 
1146           setOperationAction({ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
1147                               ISD::VP_SETCC, ISD::VP_TRUNCATE},
1148                              VT, Custom);
1149 
1150           setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
1151           continue;
1152         }
1153 
1154         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
1155         // it before type legalization for i64 vectors on RV32. It will then be
1156         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
1157         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
1158         // improvements first.
1159         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
1160           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
1161           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
1162         }
1163 
1164         setOperationAction(
1165             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
1166 
1167         setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
1168                             ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1169                             ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1170                             ISD::VP_SCATTER},
1171                            VT, Custom);
1172 
1173         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
1174                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
1175                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
1176                            VT, Custom);
1177 
1178         setOperationAction(
1179             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
1180 
1181         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
1182         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
1183           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
1184 
1185         setOperationAction(
1186             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
1187             Custom);
1188 
1189         setOperationAction(ISD::VSELECT, VT, Custom);
1190         setOperationAction(ISD::SELECT_CC, VT, Expand);
1191 
1192         setOperationAction(
1193             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
1194 
1195         // Custom-lower reduction operations to set up the corresponding custom
1196         // nodes' operands.
1197         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
1198                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
1199                             ISD::VECREDUCE_UMIN},
1200                            VT, Custom);
1201 
1202         setOperationAction(IntegerVPOps, VT, Custom);
1203 
1204         if (Subtarget.hasStdExtZvkb())
1205           setOperationAction({ISD::BSWAP, ISD::ROTL, ISD::ROTR}, VT, Custom);
1206 
1207         if (Subtarget.hasStdExtZvbb()) {
1208           setOperationAction({ISD::BITREVERSE, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
1209                               ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTPOP},
1210                              VT, Custom);
1211         } else {
1212           // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the
1213           // range of f32.
1214           EVT FloatVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1215           if (isTypeLegal(FloatVT))
1216             setOperationAction(
1217                 {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
1218                 Custom);
1219         }
1220       }
1221 
1222       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
1223         // There are no extending loads or truncating stores.
1224         for (MVT InnerVT : MVT::fp_fixedlen_vector_valuetypes()) {
1225           setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1226           setTruncStoreAction(VT, InnerVT, Expand);
1227         }
1228 
1229         if (!useRVVForFixedLengthVectorVT(VT))
1230           continue;
1231 
1232         // By default everything must be expanded.
1233         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1234           setOperationAction(Op, VT, Expand);
1235 
1236         // Custom lower fixed vector undefs to scalable vector undefs to avoid
1237         // expansion to a build_vector of 0s.
1238         setOperationAction(ISD::UNDEF, VT, Custom);
1239 
1240         if (VT.getVectorElementType() == MVT::f16 &&
1241             !Subtarget.hasVInstructionsF16()) {
1242           setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
1243           setOperationAction({ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
1244                              Custom);
1245           setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
1246           setOperationAction(
1247               {ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
1248               Custom);
1249           setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP,
1250                               ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1251                              VT, Custom);
1252           setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1253                               ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
1254                              VT, Custom);
1255           setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
1256           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1257           MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1258           // Don't promote f16 vector operations to f32 if f32 vector type is
1259           // not legal.
1260           // TODO: could split the f16 vector into two vectors and do promotion.
1261           if (!isTypeLegal(F32VecVT))
1262             continue;
1263           setOperationPromotedToType(ZvfhminPromoteOps, VT, F32VecVT);
1264           setOperationPromotedToType(ZvfhminPromoteVPOps, VT, F32VecVT);
1265           continue;
1266         }
1267 
1268         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
1269         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
1270                            Custom);
1271 
1272         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
1273                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
1274                             ISD::EXTRACT_VECTOR_ELT},
1275                            VT, Custom);
1276 
1277         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
1278                             ISD::MGATHER, ISD::MSCATTER},
1279                            VT, Custom);
1280 
1281         setOperationAction({ISD::VP_LOAD, ISD::VP_STORE,
1282                             ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1283                             ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1284                             ISD::VP_SCATTER},
1285                            VT, Custom);
1286 
1287         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
1288                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
1289                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM,
1290                             ISD::IS_FPCLASS, ISD::FMAXIMUM, ISD::FMINIMUM},
1291                            VT, Custom);
1292 
1293         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
1294 
1295         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
1296                             ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT},
1297                            VT, Custom);
1298 
1299         setCondCodeAction(VFPCCToExpand, VT, Expand);
1300 
1301         setOperationAction(ISD::SETCC, VT, Custom);
1302         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
1303         setOperationAction(ISD::SELECT_CC, VT, Expand);
1304 
1305         setOperationAction(ISD::BITCAST, VT, Custom);
1306 
1307         setOperationAction(FloatingPointVecReduceOps, VT, Custom);
1308 
1309         setOperationAction(FloatingPointVPOps, VT, Custom);
1310 
1311         setOperationAction({ISD::STRICT_FP_EXTEND, ISD::STRICT_FP_ROUND}, VT,
1312                            Custom);
1313         setOperationAction(
1314             {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
1315              ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA,
1316              ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::STRICT_FTRUNC,
1317              ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, ISD::STRICT_FROUND,
1318              ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT},
1319             VT, Custom);
1320       }
1321 
1322       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1323       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
1324                          Custom);
1325       if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin())
1326         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1327       if (Subtarget.hasStdExtFOrZfinx())
1328         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1329       if (Subtarget.hasStdExtDOrZdinx())
1330         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1331     }
1332   }
1333 
1334   if (Subtarget.hasStdExtA()) {
1335     setOperationAction(ISD::ATOMIC_LOAD_SUB, XLenVT, Expand);
1336     if (RV64LegalI32 && Subtarget.is64Bit())
1337       setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
1338   }
1339 
1340   if (Subtarget.hasForcedAtomics()) {
1341     // Force __sync libcalls to be emitted for atomic rmw/cas operations.
1342     setOperationAction(
1343         {ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD,
1344          ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR,
1345          ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN,
1346          ISD::ATOMIC_LOAD_MAX, ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX},
1347         XLenVT, LibCall);
1348   }
1349 
1350   if (Subtarget.hasVendorXTHeadMemIdx()) {
1351     for (unsigned im = (unsigned)ISD::PRE_INC; im != (unsigned)ISD::POST_DEC;
1352          ++im) {
1353       setIndexedLoadAction(im, MVT::i8, Legal);
1354       setIndexedStoreAction(im, MVT::i8, Legal);
1355       setIndexedLoadAction(im, MVT::i16, Legal);
1356       setIndexedStoreAction(im, MVT::i16, Legal);
1357       setIndexedLoadAction(im, MVT::i32, Legal);
1358       setIndexedStoreAction(im, MVT::i32, Legal);
1359 
1360       if (Subtarget.is64Bit()) {
1361         setIndexedLoadAction(im, MVT::i64, Legal);
1362         setIndexedStoreAction(im, MVT::i64, Legal);
1363       }
1364     }
1365   }
1366 
1367   // Function alignments.
1368   const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4);
1369   setMinFunctionAlignment(FunctionAlignment);
1370   // Set preferred alignments.
1371   setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment());
1372   setPrefLoopAlignment(Subtarget.getPrefLoopAlignment());
1373 
1374   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
1375                        ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
1376                        ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});
1377   if (Subtarget.is64Bit())
1378     setTargetDAGCombine(ISD::SRA);
1379 
1380   if (Subtarget.hasStdExtFOrZfinx())
1381     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
1382 
1383   if (Subtarget.hasStdExtZbb())
1384     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
1385 
1386   if (Subtarget.hasStdExtZbs() && Subtarget.is64Bit())
1387     setTargetDAGCombine(ISD::TRUNCATE);
1388 
1389   if (Subtarget.hasStdExtZbkb())
1390     setTargetDAGCombine(ISD::BITREVERSE);
1391   if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin())
1392     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1393   if (Subtarget.hasStdExtFOrZfinx())
1394     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1395                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
1396   if (Subtarget.hasVInstructions())
1397     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
1398                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
1399                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
1400                          ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
1401                          ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL,
1402                          ISD::INSERT_VECTOR_ELT});
1403   if (Subtarget.hasVendorXTHeadMemPair())
1404     setTargetDAGCombine({ISD::LOAD, ISD::STORE});
1405   if (Subtarget.useRVVForFixedLengthVectors())
1406     setTargetDAGCombine(ISD::BITCAST);
1407 
1408   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1409   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1410 
1411   // Disable strict node mutation.
1412   IsStrictFPEnabled = true;
1413 }
1414 
1415 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1416                                             LLVMContext &Context,
1417                                             EVT VT) const {
1418   if (!VT.isVector())
1419     return getPointerTy(DL);
1420   if (Subtarget.hasVInstructions() &&
1421       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1422     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1423   return VT.changeVectorElementTypeToInteger();
1424 }
1425 
1426 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1427   return Subtarget.getXLenVT();
1428 }
1429 
1430 // Return false if we can lower get_vector_length to a vsetvli intrinsic.
1431 bool RISCVTargetLowering::shouldExpandGetVectorLength(EVT TripCountVT,
1432                                                       unsigned VF,
1433                                                       bool IsScalable) const {
1434   if (!Subtarget.hasVInstructions())
1435     return true;
1436 
1437   if (!IsScalable)
1438     return true;
1439 
1440   if (TripCountVT != MVT::i32 && TripCountVT != Subtarget.getXLenVT())
1441     return true;
1442 
1443   // Don't allow VF=1 if those types are't legal.
1444   if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELen())
1445     return true;
1446 
1447   // VLEN=32 support is incomplete.
1448   if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
1449     return true;
1450 
1451   // The maximum VF is for the smallest element width with LMUL=8.
1452   // VF must be a power of 2.
1453   unsigned MaxVF = (RISCV::RVVBitsPerBlock / 8) * 8;
1454   return VF > MaxVF || !isPowerOf2_32(VF);
1455 }
1456 
1457 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1458                                              const CallInst &I,
1459                                              MachineFunction &MF,
1460                                              unsigned Intrinsic) const {
1461   auto &DL = I.getModule()->getDataLayout();
1462 
1463   auto SetRVVLoadStoreInfo = [&](unsigned PtrOp, bool IsStore,
1464                                  bool IsUnitStrided) {
1465     Info.opc = IsStore ? ISD::INTRINSIC_VOID : ISD::INTRINSIC_W_CHAIN;
1466     Info.ptrVal = I.getArgOperand(PtrOp);
1467     Type *MemTy;
1468     if (IsStore) {
1469       // Store value is the first operand.
1470       MemTy = I.getArgOperand(0)->getType();
1471     } else {
1472       // Use return type. If it's segment load, return type is a struct.
1473       MemTy = I.getType();
1474       if (MemTy->isStructTy())
1475         MemTy = MemTy->getStructElementType(0);
1476     }
1477     if (!IsUnitStrided)
1478       MemTy = MemTy->getScalarType();
1479 
1480     Info.memVT = getValueType(DL, MemTy);
1481     Info.align = Align(DL.getTypeSizeInBits(MemTy->getScalarType()) / 8);
1482     Info.size = MemoryLocation::UnknownSize;
1483     Info.flags |=
1484         IsStore ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
1485     return true;
1486   };
1487 
1488   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
1489     Info.flags |= MachineMemOperand::MONonTemporal;
1490 
1491   Info.flags |= RISCVTargetLowering::getTargetMMOFlags(I);
1492   switch (Intrinsic) {
1493   default:
1494     return false;
1495   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1496   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1497   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1498   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1499   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1500   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1501   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1502   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1503   case Intrinsic::riscv_masked_cmpxchg_i32:
1504     Info.opc = ISD::INTRINSIC_W_CHAIN;
1505     Info.memVT = MVT::i32;
1506     Info.ptrVal = I.getArgOperand(0);
1507     Info.offset = 0;
1508     Info.align = Align(4);
1509     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1510                  MachineMemOperand::MOVolatile;
1511     return true;
1512   case Intrinsic::riscv_masked_strided_load:
1513     return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ false,
1514                                /*IsUnitStrided*/ false);
1515   case Intrinsic::riscv_masked_strided_store:
1516     return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ true,
1517                                /*IsUnitStrided*/ false);
1518   case Intrinsic::riscv_seg2_load:
1519   case Intrinsic::riscv_seg3_load:
1520   case Intrinsic::riscv_seg4_load:
1521   case Intrinsic::riscv_seg5_load:
1522   case Intrinsic::riscv_seg6_load:
1523   case Intrinsic::riscv_seg7_load:
1524   case Intrinsic::riscv_seg8_load:
1525     return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
1526                                /*IsUnitStrided*/ false);
1527   case Intrinsic::riscv_seg2_store:
1528   case Intrinsic::riscv_seg3_store:
1529   case Intrinsic::riscv_seg4_store:
1530   case Intrinsic::riscv_seg5_store:
1531   case Intrinsic::riscv_seg6_store:
1532   case Intrinsic::riscv_seg7_store:
1533   case Intrinsic::riscv_seg8_store:
1534     // Operands are (vec, ..., vec, ptr, vl)
1535     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1536                                /*IsStore*/ true,
1537                                /*IsUnitStrided*/ false);
1538   case Intrinsic::riscv_vle:
1539   case Intrinsic::riscv_vle_mask:
1540   case Intrinsic::riscv_vleff:
1541   case Intrinsic::riscv_vleff_mask:
1542     return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1543                                /*IsStore*/ false,
1544                                /*IsUnitStrided*/ true);
1545   case Intrinsic::riscv_vse:
1546   case Intrinsic::riscv_vse_mask:
1547     return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1548                                /*IsStore*/ true,
1549                                /*IsUnitStrided*/ true);
1550   case Intrinsic::riscv_vlse:
1551   case Intrinsic::riscv_vlse_mask:
1552   case Intrinsic::riscv_vloxei:
1553   case Intrinsic::riscv_vloxei_mask:
1554   case Intrinsic::riscv_vluxei:
1555   case Intrinsic::riscv_vluxei_mask:
1556     return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1557                                /*IsStore*/ false,
1558                                /*IsUnitStrided*/ false);
1559   case Intrinsic::riscv_vsse:
1560   case Intrinsic::riscv_vsse_mask:
1561   case Intrinsic::riscv_vsoxei:
1562   case Intrinsic::riscv_vsoxei_mask:
1563   case Intrinsic::riscv_vsuxei:
1564   case Intrinsic::riscv_vsuxei_mask:
1565     return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1566                                /*IsStore*/ true,
1567                                /*IsUnitStrided*/ false);
1568   case Intrinsic::riscv_vlseg2:
1569   case Intrinsic::riscv_vlseg3:
1570   case Intrinsic::riscv_vlseg4:
1571   case Intrinsic::riscv_vlseg5:
1572   case Intrinsic::riscv_vlseg6:
1573   case Intrinsic::riscv_vlseg7:
1574   case Intrinsic::riscv_vlseg8:
1575   case Intrinsic::riscv_vlseg2ff:
1576   case Intrinsic::riscv_vlseg3ff:
1577   case Intrinsic::riscv_vlseg4ff:
1578   case Intrinsic::riscv_vlseg5ff:
1579   case Intrinsic::riscv_vlseg6ff:
1580   case Intrinsic::riscv_vlseg7ff:
1581   case Intrinsic::riscv_vlseg8ff:
1582     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1583                                /*IsStore*/ false,
1584                                /*IsUnitStrided*/ false);
1585   case Intrinsic::riscv_vlseg2_mask:
1586   case Intrinsic::riscv_vlseg3_mask:
1587   case Intrinsic::riscv_vlseg4_mask:
1588   case Intrinsic::riscv_vlseg5_mask:
1589   case Intrinsic::riscv_vlseg6_mask:
1590   case Intrinsic::riscv_vlseg7_mask:
1591   case Intrinsic::riscv_vlseg8_mask:
1592   case Intrinsic::riscv_vlseg2ff_mask:
1593   case Intrinsic::riscv_vlseg3ff_mask:
1594   case Intrinsic::riscv_vlseg4ff_mask:
1595   case Intrinsic::riscv_vlseg5ff_mask:
1596   case Intrinsic::riscv_vlseg6ff_mask:
1597   case Intrinsic::riscv_vlseg7ff_mask:
1598   case Intrinsic::riscv_vlseg8ff_mask:
1599     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
1600                                /*IsStore*/ false,
1601                                /*IsUnitStrided*/ false);
1602   case Intrinsic::riscv_vlsseg2:
1603   case Intrinsic::riscv_vlsseg3:
1604   case Intrinsic::riscv_vlsseg4:
1605   case Intrinsic::riscv_vlsseg5:
1606   case Intrinsic::riscv_vlsseg6:
1607   case Intrinsic::riscv_vlsseg7:
1608   case Intrinsic::riscv_vlsseg8:
1609   case Intrinsic::riscv_vloxseg2:
1610   case Intrinsic::riscv_vloxseg3:
1611   case Intrinsic::riscv_vloxseg4:
1612   case Intrinsic::riscv_vloxseg5:
1613   case Intrinsic::riscv_vloxseg6:
1614   case Intrinsic::riscv_vloxseg7:
1615   case Intrinsic::riscv_vloxseg8:
1616   case Intrinsic::riscv_vluxseg2:
1617   case Intrinsic::riscv_vluxseg3:
1618   case Intrinsic::riscv_vluxseg4:
1619   case Intrinsic::riscv_vluxseg5:
1620   case Intrinsic::riscv_vluxseg6:
1621   case Intrinsic::riscv_vluxseg7:
1622   case Intrinsic::riscv_vluxseg8:
1623     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
1624                                /*IsStore*/ false,
1625                                /*IsUnitStrided*/ false);
1626   case Intrinsic::riscv_vlsseg2_mask:
1627   case Intrinsic::riscv_vlsseg3_mask:
1628   case Intrinsic::riscv_vlsseg4_mask:
1629   case Intrinsic::riscv_vlsseg5_mask:
1630   case Intrinsic::riscv_vlsseg6_mask:
1631   case Intrinsic::riscv_vlsseg7_mask:
1632   case Intrinsic::riscv_vlsseg8_mask:
1633   case Intrinsic::riscv_vloxseg2_mask:
1634   case Intrinsic::riscv_vloxseg3_mask:
1635   case Intrinsic::riscv_vloxseg4_mask:
1636   case Intrinsic::riscv_vloxseg5_mask:
1637   case Intrinsic::riscv_vloxseg6_mask:
1638   case Intrinsic::riscv_vloxseg7_mask:
1639   case Intrinsic::riscv_vloxseg8_mask:
1640   case Intrinsic::riscv_vluxseg2_mask:
1641   case Intrinsic::riscv_vluxseg3_mask:
1642   case Intrinsic::riscv_vluxseg4_mask:
1643   case Intrinsic::riscv_vluxseg5_mask:
1644   case Intrinsic::riscv_vluxseg6_mask:
1645   case Intrinsic::riscv_vluxseg7_mask:
1646   case Intrinsic::riscv_vluxseg8_mask:
1647     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5,
1648                                /*IsStore*/ false,
1649                                /*IsUnitStrided*/ false);
1650   case Intrinsic::riscv_vsseg2:
1651   case Intrinsic::riscv_vsseg3:
1652   case Intrinsic::riscv_vsseg4:
1653   case Intrinsic::riscv_vsseg5:
1654   case Intrinsic::riscv_vsseg6:
1655   case Intrinsic::riscv_vsseg7:
1656   case Intrinsic::riscv_vsseg8:
1657     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1658                                /*IsStore*/ true,
1659                                /*IsUnitStrided*/ false);
1660   case Intrinsic::riscv_vsseg2_mask:
1661   case Intrinsic::riscv_vsseg3_mask:
1662   case Intrinsic::riscv_vsseg4_mask:
1663   case Intrinsic::riscv_vsseg5_mask:
1664   case Intrinsic::riscv_vsseg6_mask:
1665   case Intrinsic::riscv_vsseg7_mask:
1666   case Intrinsic::riscv_vsseg8_mask:
1667     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
1668                                /*IsStore*/ true,
1669                                /*IsUnitStrided*/ false);
1670   case Intrinsic::riscv_vssseg2:
1671   case Intrinsic::riscv_vssseg3:
1672   case Intrinsic::riscv_vssseg4:
1673   case Intrinsic::riscv_vssseg5:
1674   case Intrinsic::riscv_vssseg6:
1675   case Intrinsic::riscv_vssseg7:
1676   case Intrinsic::riscv_vssseg8:
1677   case Intrinsic::riscv_vsoxseg2:
1678   case Intrinsic::riscv_vsoxseg3:
1679   case Intrinsic::riscv_vsoxseg4:
1680   case Intrinsic::riscv_vsoxseg5:
1681   case Intrinsic::riscv_vsoxseg6:
1682   case Intrinsic::riscv_vsoxseg7:
1683   case Intrinsic::riscv_vsoxseg8:
1684   case Intrinsic::riscv_vsuxseg2:
1685   case Intrinsic::riscv_vsuxseg3:
1686   case Intrinsic::riscv_vsuxseg4:
1687   case Intrinsic::riscv_vsuxseg5:
1688   case Intrinsic::riscv_vsuxseg6:
1689   case Intrinsic::riscv_vsuxseg7:
1690   case Intrinsic::riscv_vsuxseg8:
1691     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
1692                                /*IsStore*/ true,
1693                                /*IsUnitStrided*/ false);
1694   case Intrinsic::riscv_vssseg2_mask:
1695   case Intrinsic::riscv_vssseg3_mask:
1696   case Intrinsic::riscv_vssseg4_mask:
1697   case Intrinsic::riscv_vssseg5_mask:
1698   case Intrinsic::riscv_vssseg6_mask:
1699   case Intrinsic::riscv_vssseg7_mask:
1700   case Intrinsic::riscv_vssseg8_mask:
1701   case Intrinsic::riscv_vsoxseg2_mask:
1702   case Intrinsic::riscv_vsoxseg3_mask:
1703   case Intrinsic::riscv_vsoxseg4_mask:
1704   case Intrinsic::riscv_vsoxseg5_mask:
1705   case Intrinsic::riscv_vsoxseg6_mask:
1706   case Intrinsic::riscv_vsoxseg7_mask:
1707   case Intrinsic::riscv_vsoxseg8_mask:
1708   case Intrinsic::riscv_vsuxseg2_mask:
1709   case Intrinsic::riscv_vsuxseg3_mask:
1710   case Intrinsic::riscv_vsuxseg4_mask:
1711   case Intrinsic::riscv_vsuxseg5_mask:
1712   case Intrinsic::riscv_vsuxseg6_mask:
1713   case Intrinsic::riscv_vsuxseg7_mask:
1714   case Intrinsic::riscv_vsuxseg8_mask:
1715     return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
1716                                /*IsStore*/ true,
1717                                /*IsUnitStrided*/ false);
1718   }
1719 }
1720 
1721 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1722                                                 const AddrMode &AM, Type *Ty,
1723                                                 unsigned AS,
1724                                                 Instruction *I) const {
1725   // No global is ever allowed as a base.
1726   if (AM.BaseGV)
1727     return false;
1728 
1729   // RVV instructions only support register addressing.
1730   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1731     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1732 
1733   // Require a 12-bit signed offset.
1734   if (!isInt<12>(AM.BaseOffs))
1735     return false;
1736 
1737   switch (AM.Scale) {
1738   case 0: // "r+i" or just "i", depending on HasBaseReg.
1739     break;
1740   case 1:
1741     if (!AM.HasBaseReg) // allow "r+i".
1742       break;
1743     return false; // disallow "r+r" or "r+r+i".
1744   default:
1745     return false;
1746   }
1747 
1748   return true;
1749 }
1750 
1751 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1752   return isInt<12>(Imm);
1753 }
1754 
1755 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1756   return isInt<12>(Imm);
1757 }
1758 
1759 // On RV32, 64-bit integers are split into their high and low parts and held
1760 // in two different registers, so the trunc is free since the low register can
1761 // just be used.
1762 // FIXME: Should we consider i64->i32 free on RV64 to match the EVT version of
1763 // isTruncateFree?
1764 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1765   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1766     return false;
1767   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1768   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1769   return (SrcBits == 64 && DestBits == 32);
1770 }
1771 
1772 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1773   // We consider i64->i32 free on RV64 since we have good selection of W
1774   // instructions that make promoting operations back to i64 free in many cases.
1775   if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
1776       !DstVT.isInteger())
1777     return false;
1778   unsigned SrcBits = SrcVT.getSizeInBits();
1779   unsigned DestBits = DstVT.getSizeInBits();
1780   return (SrcBits == 64 && DestBits == 32);
1781 }
1782 
1783 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1784   // Zexts are free if they can be combined with a load.
1785   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1786   // poorly with type legalization of compares preferring sext.
1787   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1788     EVT MemVT = LD->getMemoryVT();
1789     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1790         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1791          LD->getExtensionType() == ISD::ZEXTLOAD))
1792       return true;
1793   }
1794 
1795   return TargetLowering::isZExtFree(Val, VT2);
1796 }
1797 
1798 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1799   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1800 }
1801 
1802 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1803   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1804 }
1805 
1806 bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
1807   return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXCVbitmanip();
1808 }
1809 
1810 bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
1811   return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
1812          Subtarget.hasVendorXCVbitmanip();
1813 }
1814 
1815 bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
1816     const Instruction &AndI) const {
1817   // We expect to be able to match a bit extraction instruction if the Zbs
1818   // extension is supported and the mask is a power of two. However, we
1819   // conservatively return false if the mask would fit in an ANDI instruction,
1820   // on the basis that it's possible the sinking+duplication of the AND in
1821   // CodeGenPrepare triggered by this hook wouldn't decrease the instruction
1822   // count and would increase code size (e.g. ANDI+BNEZ => BEXTI+BNEZ).
1823   if (!Subtarget.hasStdExtZbs() && !Subtarget.hasVendorXTHeadBs())
1824     return false;
1825   ConstantInt *Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
1826   if (!Mask)
1827     return false;
1828   return !Mask->getValue().isSignedIntN(12) && Mask->getValue().isPowerOf2();
1829 }
1830 
1831 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1832   EVT VT = Y.getValueType();
1833 
1834   // FIXME: Support vectors once we have tests.
1835   if (VT.isVector())
1836     return false;
1837 
1838   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
1839          !isa<ConstantSDNode>(Y);
1840 }
1841 
1842 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1843   // Zbs provides BEXT[_I], which can be used with SEQZ/SNEZ as a bit test.
1844   if (Subtarget.hasStdExtZbs())
1845     return X.getValueType().isScalarInteger();
1846   auto *C = dyn_cast<ConstantSDNode>(Y);
1847   // XTheadBs provides th.tst (similar to bexti), if Y is a constant
1848   if (Subtarget.hasVendorXTHeadBs())
1849     return C != nullptr;
1850   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1851   return C && C->getAPIntValue().ule(10);
1852 }
1853 
1854 bool RISCVTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
1855                                                                EVT VT) const {
1856   // Only enable for rvv.
1857   if (!VT.isVector() || !Subtarget.hasVInstructions())
1858     return false;
1859 
1860   if (VT.isFixedLengthVector() && !isTypeLegal(VT))
1861     return false;
1862 
1863   return true;
1864 }
1865 
1866 bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1867                                                             Type *Ty) const {
1868   assert(Ty->isIntegerTy());
1869 
1870   unsigned BitSize = Ty->getIntegerBitWidth();
1871   if (BitSize > Subtarget.getXLen())
1872     return false;
1873 
1874   // Fast path, assume 32-bit immediates are cheap.
1875   int64_t Val = Imm.getSExtValue();
1876   if (isInt<32>(Val))
1877     return true;
1878 
1879   // A constant pool entry may be more aligned thant he load we're trying to
1880   // replace. If we don't support unaligned scalar mem, prefer the constant
1881   // pool.
1882   // TODO: Can the caller pass down the alignment?
1883   if (!Subtarget.hasFastUnalignedAccess())
1884     return true;
1885 
1886   // Prefer to keep the load if it would require many instructions.
1887   // This uses the same threshold we use for constant pools but doesn't
1888   // check useConstantPoolForLargeInts.
1889   // TODO: Should we keep the load only when we're definitely going to emit a
1890   // constant pool?
1891 
1892   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, Subtarget);
1893   return Seq.size() <= Subtarget.getMaxBuildIntsCost();
1894 }
1895 
1896 bool RISCVTargetLowering::
1897     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
1898         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
1899         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
1900         SelectionDAG &DAG) const {
1901   // One interesting pattern that we'd want to form is 'bit extract':
1902   //   ((1 >> Y) & 1) ==/!= 0
1903   // But we also need to be careful not to try to reverse that fold.
1904 
1905   // Is this '((1 >> Y) & 1)'?
1906   if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
1907     return false; // Keep the 'bit extract' pattern.
1908 
1909   // Will this be '((1 >> Y) & 1)' after the transform?
1910   if (NewShiftOpcode == ISD::SRL && CC->isOne())
1911     return true; // Do form the 'bit extract' pattern.
1912 
1913   // If 'X' is a constant, and we transform, then we will immediately
1914   // try to undo the fold, thus causing endless combine loop.
1915   // So only do the transform if X is not a constant. This matches the default
1916   // implementation of this function.
1917   return !XC;
1918 }
1919 
1920 bool RISCVTargetLowering::canSplatOperand(unsigned Opcode, int Operand) const {
1921   switch (Opcode) {
1922   case Instruction::Add:
1923   case Instruction::Sub:
1924   case Instruction::Mul:
1925   case Instruction::And:
1926   case Instruction::Or:
1927   case Instruction::Xor:
1928   case Instruction::FAdd:
1929   case Instruction::FSub:
1930   case Instruction::FMul:
1931   case Instruction::FDiv:
1932   case Instruction::ICmp:
1933   case Instruction::FCmp:
1934     return true;
1935   case Instruction::Shl:
1936   case Instruction::LShr:
1937   case Instruction::AShr:
1938   case Instruction::UDiv:
1939   case Instruction::SDiv:
1940   case Instruction::URem:
1941   case Instruction::SRem:
1942     return Operand == 1;
1943   default:
1944     return false;
1945   }
1946 }
1947 
1948 
1949 bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {
1950   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1951     return false;
1952 
1953   if (canSplatOperand(I->getOpcode(), Operand))
1954     return true;
1955 
1956   auto *II = dyn_cast<IntrinsicInst>(I);
1957   if (!II)
1958     return false;
1959 
1960   switch (II->getIntrinsicID()) {
1961   case Intrinsic::fma:
1962   case Intrinsic::vp_fma:
1963     return Operand == 0 || Operand == 1;
1964   case Intrinsic::vp_shl:
1965   case Intrinsic::vp_lshr:
1966   case Intrinsic::vp_ashr:
1967   case Intrinsic::vp_udiv:
1968   case Intrinsic::vp_sdiv:
1969   case Intrinsic::vp_urem:
1970   case Intrinsic::vp_srem:
1971     return Operand == 1;
1972     // These intrinsics are commutative.
1973   case Intrinsic::vp_add:
1974   case Intrinsic::vp_mul:
1975   case Intrinsic::vp_and:
1976   case Intrinsic::vp_or:
1977   case Intrinsic::vp_xor:
1978   case Intrinsic::vp_fadd:
1979   case Intrinsic::vp_fmul:
1980   case Intrinsic::vp_icmp:
1981   case Intrinsic::vp_fcmp:
1982     // These intrinsics have 'vr' versions.
1983   case Intrinsic::vp_sub:
1984   case Intrinsic::vp_fsub:
1985   case Intrinsic::vp_fdiv:
1986     return Operand == 0 || Operand == 1;
1987   default:
1988     return false;
1989   }
1990 }
1991 
1992 /// Check if sinking \p I's operands to I's basic block is profitable, because
1993 /// the operands can be folded into a target instruction, e.g.
1994 /// splats of scalars can fold into vector instructions.
1995 bool RISCVTargetLowering::shouldSinkOperands(
1996     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1997   using namespace llvm::PatternMatch;
1998 
1999   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
2000     return false;
2001 
2002   for (auto OpIdx : enumerate(I->operands())) {
2003     if (!canSplatOperand(I, OpIdx.index()))
2004       continue;
2005 
2006     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
2007     // Make sure we are not already sinking this operand
2008     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
2009       continue;
2010 
2011     // We are looking for a splat that can be sunk.
2012     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
2013                              m_Undef(), m_ZeroMask())))
2014       continue;
2015 
2016     // Don't sink i1 splats.
2017     if (cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(1))
2018       continue;
2019 
2020     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
2021     // and vector registers
2022     for (Use &U : Op->uses()) {
2023       Instruction *Insn = cast<Instruction>(U.getUser());
2024       if (!canSplatOperand(Insn, U.getOperandNo()))
2025         return false;
2026     }
2027 
2028     Ops.push_back(&Op->getOperandUse(0));
2029     Ops.push_back(&OpIdx.value());
2030   }
2031   return true;
2032 }
2033 
2034 bool RISCVTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
2035   unsigned Opc = VecOp.getOpcode();
2036 
2037   // Assume target opcodes can't be scalarized.
2038   // TODO - do we have any exceptions?
2039   if (Opc >= ISD::BUILTIN_OP_END)
2040     return false;
2041 
2042   // If the vector op is not supported, try to convert to scalar.
2043   EVT VecVT = VecOp.getValueType();
2044   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
2045     return true;
2046 
2047   // If the vector op is supported, but the scalar op is not, the transform may
2048   // not be worthwhile.
2049   // Permit a vector binary operation can be converted to scalar binary
2050   // operation which is custom lowered with illegal type.
2051   EVT ScalarVT = VecVT.getScalarType();
2052   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT) ||
2053          isOperationCustom(Opc, ScalarVT);
2054 }
2055 
2056 bool RISCVTargetLowering::isOffsetFoldingLegal(
2057     const GlobalAddressSDNode *GA) const {
2058   // In order to maximise the opportunity for common subexpression elimination,
2059   // keep a separate ADD node for the global address offset instead of folding
2060   // it in the global address node. Later peephole optimisations may choose to
2061   // fold it back in when profitable.
2062   return false;
2063 }
2064 
2065 // Return one of the followings:
2066 // (1) `{0-31 value, false}` if FLI is available for Imm's type and FP value.
2067 // (2) `{0-31 value, true}` if Imm is negative and FLI is available for its
2068 // positive counterpart, which will be materialized from the first returned
2069 // element. The second returned element indicated that there should be a FNEG
2070 // followed.
2071 // (3) `{-1, _}` if there is no way FLI can be used to materialize Imm.
2072 std::pair<int, bool> RISCVTargetLowering::getLegalZfaFPImm(const APFloat &Imm,
2073                                                            EVT VT) const {
2074   if (!Subtarget.hasStdExtZfa())
2075     return std::make_pair(-1, false);
2076 
2077   bool IsSupportedVT = false;
2078   if (VT == MVT::f16) {
2079     IsSupportedVT = Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh();
2080   } else if (VT == MVT::f32) {
2081     IsSupportedVT = true;
2082   } else if (VT == MVT::f64) {
2083     assert(Subtarget.hasStdExtD() && "Expect D extension");
2084     IsSupportedVT = true;
2085   }
2086 
2087   if (!IsSupportedVT)
2088     return std::make_pair(-1, false);
2089 
2090   int Index = RISCVLoadFPImm::getLoadFPImm(Imm);
2091   if (Index < 0 && Imm.isNegative())
2092     // Try the combination of its positive counterpart + FNEG.
2093     return std::make_pair(RISCVLoadFPImm::getLoadFPImm(-Imm), true);
2094   else
2095     return std::make_pair(Index, false);
2096 }
2097 
2098 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
2099                                        bool ForCodeSize) const {
2100   bool IsLegalVT = false;
2101   if (VT == MVT::f16)
2102     IsLegalVT = Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin();
2103   else if (VT == MVT::f32)
2104     IsLegalVT = Subtarget.hasStdExtFOrZfinx();
2105   else if (VT == MVT::f64)
2106     IsLegalVT = Subtarget.hasStdExtDOrZdinx();
2107   else if (VT == MVT::bf16)
2108     IsLegalVT = Subtarget.hasStdExtZfbfmin();
2109 
2110   if (!IsLegalVT)
2111     return false;
2112 
2113   if (getLegalZfaFPImm(Imm, VT).first >= 0)
2114     return true;
2115 
2116   // Cannot create a 64 bit floating-point immediate value for rv32.
2117   if (Subtarget.getXLen() < VT.getScalarSizeInBits()) {
2118     // td can handle +0.0 or -0.0 already.
2119     // -0.0 can be created by fmv + fneg.
2120     return Imm.isZero();
2121   }
2122 
2123   // Special case: fmv + fneg
2124   if (Imm.isNegZero())
2125     return true;
2126 
2127   // Building an integer and then converting requires a fmv at the end of
2128   // the integer sequence.
2129   const int Cost =
2130       1 + RISCVMatInt::getIntMatCost(Imm.bitcastToAPInt(), Subtarget.getXLen(),
2131                                      Subtarget);
2132   return Cost <= FPImmCost;
2133 }
2134 
2135 // TODO: This is very conservative.
2136 bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2137                                                   unsigned Index) const {
2138   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
2139     return false;
2140 
2141   // Only support extracting a fixed from a fixed vector for now.
2142   if (ResVT.isScalableVector() || SrcVT.isScalableVector())
2143     return false;
2144 
2145   unsigned ResElts = ResVT.getVectorNumElements();
2146   unsigned SrcElts = SrcVT.getVectorNumElements();
2147 
2148   // Convervatively only handle extracting half of a vector.
2149   // TODO: Relax this.
2150   if ((ResElts * 2) != SrcElts)
2151     return false;
2152 
2153   // The smallest type we can slide is i8.
2154   // TODO: We can extract index 0 from a mask vector without a slide.
2155   if (ResVT.getVectorElementType() == MVT::i1)
2156     return false;
2157 
2158   // Slide can support arbitrary index, but we only treat vslidedown.vi as
2159   // cheap.
2160   if (Index >= 32)
2161     return false;
2162 
2163   // TODO: We can do arbitrary slidedowns, but for now only support extracting
2164   // the upper half of a vector until we have more test coverage.
2165   return Index == 0 || Index == ResElts;
2166 }
2167 
2168 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2169                                                       CallingConv::ID CC,
2170                                                       EVT VT) const {
2171   // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
2172   // We might still end up using a GPR but that will be decided based on ABI.
2173   if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
2174       !Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin())
2175     return MVT::f32;
2176 
2177   MVT PartVT = TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2178 
2179   if (RV64LegalI32 && Subtarget.is64Bit() && PartVT == MVT::i32)
2180     return MVT::i64;
2181 
2182   return PartVT;
2183 }
2184 
2185 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2186                                                            CallingConv::ID CC,
2187                                                            EVT VT) const {
2188   // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
2189   // We might still end up using a GPR but that will be decided based on ABI.
2190   if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
2191       !Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin())
2192     return 1;
2193 
2194   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2195 }
2196 
2197 unsigned RISCVTargetLowering::getVectorTypeBreakdownForCallingConv(
2198     LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2199     unsigned &NumIntermediates, MVT &RegisterVT) const {
2200   unsigned NumRegs = TargetLowering::getVectorTypeBreakdownForCallingConv(
2201       Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
2202 
2203   if (RV64LegalI32 && Subtarget.is64Bit() && IntermediateVT == MVT::i32)
2204     IntermediateVT = MVT::i64;
2205 
2206   if (RV64LegalI32 && Subtarget.is64Bit() && RegisterVT == MVT::i32)
2207     RegisterVT = MVT::i64;
2208 
2209   return NumRegs;
2210 }
2211 
2212 // Changes the condition code and swaps operands if necessary, so the SetCC
2213 // operation matches one of the comparisons supported directly by branches
2214 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
2215 // with 1/-1.
2216 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
2217                                     ISD::CondCode &CC, SelectionDAG &DAG) {
2218   // If this is a single bit test that can't be handled by ANDI, shift the
2219   // bit to be tested to the MSB and perform a signed compare with 0.
2220   if (isIntEqualitySetCC(CC) && isNullConstant(RHS) &&
2221       LHS.getOpcode() == ISD::AND && LHS.hasOneUse() &&
2222       isa<ConstantSDNode>(LHS.getOperand(1))) {
2223     uint64_t Mask = LHS.getConstantOperandVal(1);
2224     if ((isPowerOf2_64(Mask) || isMask_64(Mask)) && !isInt<12>(Mask)) {
2225       unsigned ShAmt = 0;
2226       if (isPowerOf2_64(Mask)) {
2227         CC = CC == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
2228         ShAmt = LHS.getValueSizeInBits() - 1 - Log2_64(Mask);
2229       } else {
2230         ShAmt = LHS.getValueSizeInBits() - llvm::bit_width(Mask);
2231       }
2232 
2233       LHS = LHS.getOperand(0);
2234       if (ShAmt != 0)
2235         LHS = DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS,
2236                           DAG.getConstant(ShAmt, DL, LHS.getValueType()));
2237       return;
2238     }
2239   }
2240 
2241   if (auto *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2242     int64_t C = RHSC->getSExtValue();
2243     switch (CC) {
2244     default: break;
2245     case ISD::SETGT:
2246       // Convert X > -1 to X >= 0.
2247       if (C == -1) {
2248         RHS = DAG.getConstant(0, DL, RHS.getValueType());
2249         CC = ISD::SETGE;
2250         return;
2251       }
2252       break;
2253     case ISD::SETLT:
2254       // Convert X < 1 to 0 >= X.
2255       if (C == 1) {
2256         RHS = LHS;
2257         LHS = DAG.getConstant(0, DL, RHS.getValueType());
2258         CC = ISD::SETGE;
2259         return;
2260       }
2261       break;
2262     }
2263   }
2264 
2265   switch (CC) {
2266   default:
2267     break;
2268   case ISD::SETGT:
2269   case ISD::SETLE:
2270   case ISD::SETUGT:
2271   case ISD::SETULE:
2272     CC = ISD::getSetCCSwappedOperands(CC);
2273     std::swap(LHS, RHS);
2274     break;
2275   }
2276 }
2277 
2278 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
2279   assert(VT.isScalableVector() && "Expecting a scalable vector type");
2280   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
2281   if (VT.getVectorElementType() == MVT::i1)
2282     KnownSize *= 8;
2283 
2284   switch (KnownSize) {
2285   default:
2286     llvm_unreachable("Invalid LMUL.");
2287   case 8:
2288     return RISCVII::VLMUL::LMUL_F8;
2289   case 16:
2290     return RISCVII::VLMUL::LMUL_F4;
2291   case 32:
2292     return RISCVII::VLMUL::LMUL_F2;
2293   case 64:
2294     return RISCVII::VLMUL::LMUL_1;
2295   case 128:
2296     return RISCVII::VLMUL::LMUL_2;
2297   case 256:
2298     return RISCVII::VLMUL::LMUL_4;
2299   case 512:
2300     return RISCVII::VLMUL::LMUL_8;
2301   }
2302 }
2303 
2304 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
2305   switch (LMul) {
2306   default:
2307     llvm_unreachable("Invalid LMUL.");
2308   case RISCVII::VLMUL::LMUL_F8:
2309   case RISCVII::VLMUL::LMUL_F4:
2310   case RISCVII::VLMUL::LMUL_F2:
2311   case RISCVII::VLMUL::LMUL_1:
2312     return RISCV::VRRegClassID;
2313   case RISCVII::VLMUL::LMUL_2:
2314     return RISCV::VRM2RegClassID;
2315   case RISCVII::VLMUL::LMUL_4:
2316     return RISCV::VRM4RegClassID;
2317   case RISCVII::VLMUL::LMUL_8:
2318     return RISCV::VRM8RegClassID;
2319   }
2320 }
2321 
2322 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
2323   RISCVII::VLMUL LMUL = getLMUL(VT);
2324   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
2325       LMUL == RISCVII::VLMUL::LMUL_F4 ||
2326       LMUL == RISCVII::VLMUL::LMUL_F2 ||
2327       LMUL == RISCVII::VLMUL::LMUL_1) {
2328     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
2329                   "Unexpected subreg numbering");
2330     return RISCV::sub_vrm1_0 + Index;
2331   }
2332   if (LMUL == RISCVII::VLMUL::LMUL_2) {
2333     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
2334                   "Unexpected subreg numbering");
2335     return RISCV::sub_vrm2_0 + Index;
2336   }
2337   if (LMUL == RISCVII::VLMUL::LMUL_4) {
2338     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
2339                   "Unexpected subreg numbering");
2340     return RISCV::sub_vrm4_0 + Index;
2341   }
2342   llvm_unreachable("Invalid vector type.");
2343 }
2344 
2345 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
2346   if (VT.getVectorElementType() == MVT::i1)
2347     return RISCV::VRRegClassID;
2348   return getRegClassIDForLMUL(getLMUL(VT));
2349 }
2350 
2351 // Attempt to decompose a subvector insert/extract between VecVT and
2352 // SubVecVT via subregister indices. Returns the subregister index that
2353 // can perform the subvector insert/extract with the given element index, as
2354 // well as the index corresponding to any leftover subvectors that must be
2355 // further inserted/extracted within the register class for SubVecVT.
2356 std::pair<unsigned, unsigned>
2357 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
2358     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
2359     const RISCVRegisterInfo *TRI) {
2360   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
2361                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
2362                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
2363                 "Register classes not ordered");
2364   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
2365   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
2366   // Try to compose a subregister index that takes us from the incoming
2367   // LMUL>1 register class down to the outgoing one. At each step we half
2368   // the LMUL:
2369   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
2370   // Note that this is not guaranteed to find a subregister index, such as
2371   // when we are extracting from one VR type to another.
2372   unsigned SubRegIdx = RISCV::NoSubRegister;
2373   for (const unsigned RCID :
2374        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
2375     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
2376       VecVT = VecVT.getHalfNumVectorElementsVT();
2377       bool IsHi =
2378           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
2379       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
2380                                             getSubregIndexByMVT(VecVT, IsHi));
2381       if (IsHi)
2382         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
2383     }
2384   return {SubRegIdx, InsertExtractIdx};
2385 }
2386 
2387 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
2388 // stores for those types.
2389 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
2390   return !Subtarget.useRVVForFixedLengthVectors() ||
2391          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
2392 }
2393 
2394 bool RISCVTargetLowering::isLegalElementTypeForRVV(EVT ScalarTy) const {
2395   if (!ScalarTy.isSimple())
2396     return false;
2397   switch (ScalarTy.getSimpleVT().SimpleTy) {
2398   case MVT::iPTR:
2399     return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
2400   case MVT::i8:
2401   case MVT::i16:
2402   case MVT::i32:
2403     return true;
2404   case MVT::i64:
2405     return Subtarget.hasVInstructionsI64();
2406   case MVT::f16:
2407     return Subtarget.hasVInstructionsF16();
2408   case MVT::f32:
2409     return Subtarget.hasVInstructionsF32();
2410   case MVT::f64:
2411     return Subtarget.hasVInstructionsF64();
2412   default:
2413     return false;
2414   }
2415 }
2416 
2417 
2418 unsigned RISCVTargetLowering::combineRepeatedFPDivisors() const {
2419   return NumRepeatedDivisors;
2420 }
2421 
2422 static SDValue getVLOperand(SDValue Op) {
2423   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2424           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
2425          "Unexpected opcode");
2426   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
2427   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
2428   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
2429       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
2430   if (!II)
2431     return SDValue();
2432   return Op.getOperand(II->VLOperand + 1 + HasChain);
2433 }
2434 
2435 static bool useRVVForFixedLengthVectorVT(MVT VT,
2436                                          const RISCVSubtarget &Subtarget) {
2437   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
2438   if (!Subtarget.useRVVForFixedLengthVectors())
2439     return false;
2440 
2441   // We only support a set of vector types with a consistent maximum fixed size
2442   // across all supported vector element types to avoid legalization issues.
2443   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
2444   // fixed-length vector type we support is 1024 bytes.
2445   if (VT.getFixedSizeInBits() > 1024 * 8)
2446     return false;
2447 
2448   unsigned MinVLen = Subtarget.getRealMinVLen();
2449 
2450   MVT EltVT = VT.getVectorElementType();
2451 
2452   // Don't use RVV for vectors we cannot scalarize if required.
2453   switch (EltVT.SimpleTy) {
2454   // i1 is supported but has different rules.
2455   default:
2456     return false;
2457   case MVT::i1:
2458     // Masks can only use a single register.
2459     if (VT.getVectorNumElements() > MinVLen)
2460       return false;
2461     MinVLen /= 8;
2462     break;
2463   case MVT::i8:
2464   case MVT::i16:
2465   case MVT::i32:
2466     break;
2467   case MVT::i64:
2468     if (!Subtarget.hasVInstructionsI64())
2469       return false;
2470     break;
2471   case MVT::f16:
2472     if (!Subtarget.hasVInstructionsF16Minimal())
2473       return false;
2474     break;
2475   case MVT::f32:
2476     if (!Subtarget.hasVInstructionsF32())
2477       return false;
2478     break;
2479   case MVT::f64:
2480     if (!Subtarget.hasVInstructionsF64())
2481       return false;
2482     break;
2483   }
2484 
2485   // Reject elements larger than ELEN.
2486   if (EltVT.getSizeInBits() > Subtarget.getELen())
2487     return false;
2488 
2489   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
2490   // Don't use RVV for types that don't fit.
2491   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
2492     return false;
2493 
2494   // TODO: Perhaps an artificial restriction, but worth having whilst getting
2495   // the base fixed length RVV support in place.
2496   if (!VT.isPow2VectorType())
2497     return false;
2498 
2499   return true;
2500 }
2501 
2502 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
2503   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
2504 }
2505 
2506 // Return the largest legal scalable vector type that matches VT's element type.
2507 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
2508                                             const RISCVSubtarget &Subtarget) {
2509   // This may be called before legal types are setup.
2510   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
2511           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
2512          "Expected legal fixed length vector!");
2513 
2514   unsigned MinVLen = Subtarget.getRealMinVLen();
2515   unsigned MaxELen = Subtarget.getELen();
2516 
2517   MVT EltVT = VT.getVectorElementType();
2518   switch (EltVT.SimpleTy) {
2519   default:
2520     llvm_unreachable("unexpected element type for RVV container");
2521   case MVT::i1:
2522   case MVT::i8:
2523   case MVT::i16:
2524   case MVT::i32:
2525   case MVT::i64:
2526   case MVT::f16:
2527   case MVT::f32:
2528   case MVT::f64: {
2529     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
2530     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
2531     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
2532     unsigned NumElts =
2533         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
2534     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
2535     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
2536     return MVT::getScalableVectorVT(EltVT, NumElts);
2537   }
2538   }
2539 }
2540 
2541 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
2542                                             const RISCVSubtarget &Subtarget) {
2543   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
2544                                           Subtarget);
2545 }
2546 
2547 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
2548   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
2549 }
2550 
2551 // Grow V to consume an entire RVV register.
2552 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
2553                                        const RISCVSubtarget &Subtarget) {
2554   assert(VT.isScalableVector() &&
2555          "Expected to convert into a scalable vector!");
2556   assert(V.getValueType().isFixedLengthVector() &&
2557          "Expected a fixed length vector operand!");
2558   SDLoc DL(V);
2559   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2560   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
2561 }
2562 
2563 // Shrink V so it's just big enough to maintain a VT's worth of data.
2564 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
2565                                          const RISCVSubtarget &Subtarget) {
2566   assert(VT.isFixedLengthVector() &&
2567          "Expected to convert into a fixed length vector!");
2568   assert(V.getValueType().isScalableVector() &&
2569          "Expected a scalable vector operand!");
2570   SDLoc DL(V);
2571   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2572   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
2573 }
2574 
2575 /// Return the type of the mask type suitable for masking the provided
2576 /// vector type.  This is simply an i1 element type vector of the same
2577 /// (possibly scalable) length.
2578 static MVT getMaskTypeFor(MVT VecVT) {
2579   assert(VecVT.isVector());
2580   ElementCount EC = VecVT.getVectorElementCount();
2581   return MVT::getVectorVT(MVT::i1, EC);
2582 }
2583 
2584 /// Creates an all ones mask suitable for masking a vector of type VecTy with
2585 /// vector length VL.  .
2586 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL,
2587                               SelectionDAG &DAG) {
2588   MVT MaskVT = getMaskTypeFor(VecVT);
2589   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2590 }
2591 
2592 static SDValue getVLOp(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
2593                        SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
2594   // If we know the exact VLEN, our VL is exactly equal to VLMAX, and
2595   // we can't encode the AVL as an immediate, use the VLMAX encoding.
2596   const auto [MinVLMAX, MaxVLMAX] =
2597       RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
2598   if (MinVLMAX == MaxVLMAX && NumElts == MinVLMAX && NumElts > 31)
2599     return DAG.getRegister(RISCV::X0, Subtarget.getXLenVT());
2600 
2601   return DAG.getConstant(NumElts, DL, Subtarget.getXLenVT());
2602 }
2603 
2604 static std::pair<SDValue, SDValue>
2605 getDefaultScalableVLOps(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG,
2606                         const RISCVSubtarget &Subtarget) {
2607   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
2608   SDValue VL = DAG.getRegister(RISCV::X0, Subtarget.getXLenVT());
2609   SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
2610   return {Mask, VL};
2611 }
2612 
2613 static std::pair<SDValue, SDValue>
2614 getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
2615                 SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
2616   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
2617   SDValue VL = getVLOp(NumElts, ContainerVT, DL, DAG, Subtarget);
2618   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
2619   return {Mask, VL};
2620 }
2621 
2622 // Gets the two common "VL" operands: an all-ones mask and the vector length.
2623 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
2624 // the vector type that the fixed-length vector is contained in. Otherwise if
2625 // VecVT is scalable, then ContainerVT should be the same as VecVT.
2626 static std::pair<SDValue, SDValue>
2627 getDefaultVLOps(MVT VecVT, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG,
2628                 const RISCVSubtarget &Subtarget) {
2629   if (VecVT.isFixedLengthVector())
2630     return getDefaultVLOps(VecVT.getVectorNumElements(), ContainerVT, DL, DAG,
2631                            Subtarget);
2632   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
2633   return getDefaultScalableVLOps(ContainerVT, DL, DAG, Subtarget);
2634 }
2635 
2636 SDValue RISCVTargetLowering::computeVLMax(MVT VecVT, const SDLoc &DL,
2637                                           SelectionDAG &DAG) const {
2638   assert(VecVT.isScalableVector() && "Expected scalable vector");
2639   return DAG.getElementCount(DL, Subtarget.getXLenVT(),
2640                              VecVT.getVectorElementCount());
2641 }
2642 
2643 std::pair<unsigned, unsigned>
2644 RISCVTargetLowering::computeVLMAXBounds(MVT VecVT,
2645                                         const RISCVSubtarget &Subtarget) {
2646   assert(VecVT.isScalableVector() && "Expected scalable vector");
2647 
2648   unsigned EltSize = VecVT.getScalarSizeInBits();
2649   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
2650 
2651   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
2652   unsigned MaxVLMAX =
2653       RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
2654 
2655   unsigned VectorBitsMin = Subtarget.getRealMinVLen();
2656   unsigned MinVLMAX =
2657       RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
2658 
2659   return std::make_pair(MinVLMAX, MaxVLMAX);
2660 }
2661 
2662 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
2663 // of either is (currently) supported. This can get us into an infinite loop
2664 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
2665 // as a ..., etc.
2666 // Until either (or both) of these can reliably lower any node, reporting that
2667 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
2668 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
2669 // which is not desirable.
2670 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
2671     EVT VT, unsigned DefinedValues) const {
2672   return false;
2673 }
2674 
2675 InstructionCost RISCVTargetLowering::getLMULCost(MVT VT) const {
2676   // TODO: Here assume reciprocal throughput is 1 for LMUL_1, it is
2677   // implementation-defined.
2678   if (!VT.isVector())
2679     return InstructionCost::getInvalid();
2680   unsigned DLenFactor = Subtarget.getDLenFactor();
2681   unsigned Cost;
2682   if (VT.isScalableVector()) {
2683     unsigned LMul;
2684     bool Fractional;
2685     std::tie(LMul, Fractional) =
2686         RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(VT));
2687     if (Fractional)
2688       Cost = LMul <= DLenFactor ? (DLenFactor / LMul) : 1;
2689     else
2690       Cost = (LMul * DLenFactor);
2691   } else {
2692     Cost = divideCeil(VT.getSizeInBits(), Subtarget.getRealMinVLen() / DLenFactor);
2693   }
2694   return Cost;
2695 }
2696 
2697 
2698 /// Return the cost of a vrgather.vv instruction for the type VT.  vrgather.vv
2699 /// is generally quadratic in the number of vreg implied by LMUL.  Note that
2700 /// operand (index and possibly mask) are handled separately.
2701 InstructionCost RISCVTargetLowering::getVRGatherVVCost(MVT VT) const {
2702   return getLMULCost(VT) * getLMULCost(VT);
2703 }
2704 
2705 /// Return the cost of a vrgather.vi (or vx) instruction for the type VT.
2706 /// vrgather.vi/vx may be linear in the number of vregs implied by LMUL,
2707 /// or may track the vrgather.vv cost. It is implementation-dependent.
2708 InstructionCost RISCVTargetLowering::getVRGatherVICost(MVT VT) const {
2709   return getLMULCost(VT);
2710 }
2711 
2712 /// Return the cost of a vslidedown.vi/vx or vslideup.vi/vx instruction
2713 /// for the type VT.  (This does not cover the vslide1up or vslide1down
2714 /// variants.)  Slides may be linear in the number of vregs implied by LMUL,
2715 /// or may track the vrgather.vv cost. It is implementation-dependent.
2716 InstructionCost RISCVTargetLowering::getVSlideCost(MVT VT) const {
2717   return getLMULCost(VT);
2718 }
2719 
2720 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
2721                                   const RISCVSubtarget &Subtarget) {
2722   // RISC-V FP-to-int conversions saturate to the destination register size, but
2723   // don't produce 0 for nan. We can use a conversion instruction and fix the
2724   // nan case with a compare and a select.
2725   SDValue Src = Op.getOperand(0);
2726 
2727   MVT DstVT = Op.getSimpleValueType();
2728   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2729 
2730   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
2731 
2732   if (!DstVT.isVector()) {
2733     // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
2734     // the result.
2735     if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
2736         Src.getValueType() == MVT::bf16) {
2737       Src = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, Src);
2738     }
2739 
2740     unsigned Opc;
2741     if (SatVT == DstVT)
2742       Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
2743     else if (DstVT == MVT::i64 && SatVT == MVT::i32)
2744       Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
2745     else
2746       return SDValue();
2747     // FIXME: Support other SatVTs by clamping before or after the conversion.
2748 
2749     SDLoc DL(Op);
2750     SDValue FpToInt = DAG.getNode(
2751         Opc, DL, DstVT, Src,
2752         DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
2753 
2754     if (Opc == RISCVISD::FCVT_WU_RV64)
2755       FpToInt = DAG.getZeroExtendInReg(FpToInt, DL, MVT::i32);
2756 
2757     SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
2758     return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt,
2759                            ISD::CondCode::SETUO);
2760   }
2761 
2762   // Vectors.
2763 
2764   MVT DstEltVT = DstVT.getVectorElementType();
2765   MVT SrcVT = Src.getSimpleValueType();
2766   MVT SrcEltVT = SrcVT.getVectorElementType();
2767   unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2768   unsigned DstEltSize = DstEltVT.getSizeInBits();
2769 
2770   // Only handle saturating to the destination type.
2771   if (SatVT != DstEltVT)
2772     return SDValue();
2773 
2774   // FIXME: Don't support narrowing by more than 1 steps for now.
2775   if (SrcEltSize > (2 * DstEltSize))
2776     return SDValue();
2777 
2778   MVT DstContainerVT = DstVT;
2779   MVT SrcContainerVT = SrcVT;
2780   if (DstVT.isFixedLengthVector()) {
2781     DstContainerVT = getContainerForFixedLengthVector(DAG, DstVT, Subtarget);
2782     SrcContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
2783     assert(DstContainerVT.getVectorElementCount() ==
2784                SrcContainerVT.getVectorElementCount() &&
2785            "Expected same element count");
2786     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2787   }
2788 
2789   SDLoc DL(Op);
2790 
2791   auto [Mask, VL] = getDefaultVLOps(DstVT, DstContainerVT, DL, DAG, Subtarget);
2792 
2793   SDValue IsNan = DAG.getNode(RISCVISD::SETCC_VL, DL, Mask.getValueType(),
2794                               {Src, Src, DAG.getCondCode(ISD::SETNE),
2795                                DAG.getUNDEF(Mask.getValueType()), Mask, VL});
2796 
2797   // Need to widen by more than 1 step, promote the FP type, then do a widening
2798   // convert.
2799   if (DstEltSize > (2 * SrcEltSize)) {
2800     assert(SrcContainerVT.getVectorElementType() == MVT::f16 && "Unexpected VT!");
2801     MVT InterVT = SrcContainerVT.changeVectorElementType(MVT::f32);
2802     Src = DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterVT, Src, Mask, VL);
2803   }
2804 
2805   unsigned RVVOpc =
2806       IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
2807   SDValue Res = DAG.getNode(RVVOpc, DL, DstContainerVT, Src, Mask, VL);
2808 
2809   SDValue SplatZero = DAG.getNode(
2810       RISCVISD::VMV_V_X_VL, DL, DstContainerVT, DAG.getUNDEF(DstContainerVT),
2811       DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
2812   Res = DAG.getNode(RISCVISD::VSELECT_VL, DL, DstContainerVT, IsNan, SplatZero,
2813                     Res, VL);
2814 
2815   if (DstVT.isFixedLengthVector())
2816     Res = convertFromScalableVector(DstVT, Res, DAG, Subtarget);
2817 
2818   return Res;
2819 }
2820 
2821 static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
2822   switch (Opc) {
2823   case ISD::FROUNDEVEN:
2824   case ISD::STRICT_FROUNDEVEN:
2825   case ISD::VP_FROUNDEVEN:
2826     return RISCVFPRndMode::RNE;
2827   case ISD::FTRUNC:
2828   case ISD::STRICT_FTRUNC:
2829   case ISD::VP_FROUNDTOZERO:
2830     return RISCVFPRndMode::RTZ;
2831   case ISD::FFLOOR:
2832   case ISD::STRICT_FFLOOR:
2833   case ISD::VP_FFLOOR:
2834     return RISCVFPRndMode::RDN;
2835   case ISD::FCEIL:
2836   case ISD::STRICT_FCEIL:
2837   case ISD::VP_FCEIL:
2838     return RISCVFPRndMode::RUP;
2839   case ISD::FROUND:
2840   case ISD::STRICT_FROUND:
2841   case ISD::VP_FROUND:
2842     return RISCVFPRndMode::RMM;
2843   case ISD::FRINT:
2844     return RISCVFPRndMode::DYN;
2845   }
2846 
2847   return RISCVFPRndMode::Invalid;
2848 }
2849 
2850 // Expand vector FTRUNC, FCEIL, FFLOOR, FROUND, VP_FCEIL, VP_FFLOOR, VP_FROUND
2851 // VP_FROUNDEVEN, VP_FROUNDTOZERO, VP_FRINT and VP_FNEARBYINT by converting to
2852 // the integer domain and back. Taking care to avoid converting values that are
2853 // nan or already correct.
2854 static SDValue
2855 lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
2856                                       const RISCVSubtarget &Subtarget) {
2857   MVT VT = Op.getSimpleValueType();
2858   assert(VT.isVector() && "Unexpected type");
2859 
2860   SDLoc DL(Op);
2861 
2862   SDValue Src = Op.getOperand(0);
2863 
2864   MVT ContainerVT = VT;
2865   if (VT.isFixedLengthVector()) {
2866     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2867     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2868   }
2869 
2870   SDValue Mask, VL;
2871   if (Op->isVPOpcode()) {
2872     Mask = Op.getOperand(1);
2873     if (VT.isFixedLengthVector())
2874       Mask = convertToScalableVector(getMaskTypeFor(ContainerVT), Mask, DAG,
2875                                      Subtarget);
2876     VL = Op.getOperand(2);
2877   } else {
2878     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2879   }
2880 
2881   // Freeze the source since we are increasing the number of uses.
2882   Src = DAG.getFreeze(Src);
2883 
2884   // We do the conversion on the absolute value and fix the sign at the end.
2885   SDValue Abs = DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, Mask, VL);
2886 
2887   // Determine the largest integer that can be represented exactly. This and
2888   // values larger than it don't have any fractional bits so don't need to
2889   // be converted.
2890   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(ContainerVT);
2891   unsigned Precision = APFloat::semanticsPrecision(FltSem);
2892   APFloat MaxVal = APFloat(FltSem);
2893   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
2894                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
2895   SDValue MaxValNode =
2896       DAG.getConstantFP(MaxVal, DL, ContainerVT.getVectorElementType());
2897   SDValue MaxValSplat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT,
2898                                     DAG.getUNDEF(ContainerVT), MaxValNode, VL);
2899 
2900   // If abs(Src) was larger than MaxVal or nan, keep it.
2901   MVT SetccVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2902   Mask =
2903       DAG.getNode(RISCVISD::SETCC_VL, DL, SetccVT,
2904                   {Abs, MaxValSplat, DAG.getCondCode(ISD::SETOLT),
2905                    Mask, Mask, VL});
2906 
2907   // Truncate to integer and convert back to FP.
2908   MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
2909   MVT XLenVT = Subtarget.getXLenVT();
2910   SDValue Truncated;
2911 
2912   switch (Op.getOpcode()) {
2913   default:
2914     llvm_unreachable("Unexpected opcode");
2915   case ISD::FCEIL:
2916   case ISD::VP_FCEIL:
2917   case ISD::FFLOOR:
2918   case ISD::VP_FFLOOR:
2919   case ISD::FROUND:
2920   case ISD::FROUNDEVEN:
2921   case ISD::VP_FROUND:
2922   case ISD::VP_FROUNDEVEN:
2923   case ISD::VP_FROUNDTOZERO: {
2924     RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
2925     assert(FRM != RISCVFPRndMode::Invalid);
2926     Truncated = DAG.getNode(RISCVISD::VFCVT_RM_X_F_VL, DL, IntVT, Src, Mask,
2927                             DAG.getTargetConstant(FRM, DL, XLenVT), VL);
2928     break;
2929   }
2930   case ISD::FTRUNC:
2931     Truncated = DAG.getNode(RISCVISD::VFCVT_RTZ_X_F_VL, DL, IntVT, Src,
2932                             Mask, VL);
2933     break;
2934   case ISD::FRINT:
2935   case ISD::VP_FRINT:
2936     Truncated = DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, IntVT, Src, Mask, VL);
2937     break;
2938   case ISD::FNEARBYINT:
2939   case ISD::VP_FNEARBYINT:
2940     Truncated = DAG.getNode(RISCVISD::VFROUND_NOEXCEPT_VL, DL, ContainerVT, Src,
2941                             Mask, VL);
2942     break;
2943   }
2944 
2945   // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL.
2946   if (Truncated.getOpcode() != RISCVISD::VFROUND_NOEXCEPT_VL)
2947     Truncated = DAG.getNode(RISCVISD::SINT_TO_FP_VL, DL, ContainerVT, Truncated,
2948                             Mask, VL);
2949 
2950   // Restore the original sign so that -0.0 is preserved.
2951   Truncated = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Truncated,
2952                           Src, Src, Mask, VL);
2953 
2954   if (!VT.isFixedLengthVector())
2955     return Truncated;
2956 
2957   return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
2958 }
2959 
2960 // Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
2961 // STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
2962 // qNan and coverting the new source to integer and back to FP.
2963 static SDValue
2964 lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
2965                                             const RISCVSubtarget &Subtarget) {
2966   SDLoc DL(Op);
2967   MVT VT = Op.getSimpleValueType();
2968   SDValue Chain = Op.getOperand(0);
2969   SDValue Src = Op.getOperand(1);
2970 
2971   MVT ContainerVT = VT;
2972   if (VT.isFixedLengthVector()) {
2973     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2974     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2975   }
2976 
2977   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2978 
2979   // Freeze the source since we are increasing the number of uses.
2980   Src = DAG.getFreeze(Src);
2981 
2982   // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
2983   MVT MaskVT = Mask.getSimpleValueType();
2984   SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
2985                                 DAG.getVTList(MaskVT, MVT::Other),
2986                                 {Chain, Src, Src, DAG.getCondCode(ISD::SETUNE),
2987                                  DAG.getUNDEF(MaskVT), Mask, VL});
2988   Chain = Unorder.getValue(1);
2989   Src = DAG.getNode(RISCVISD::STRICT_FADD_VL, DL,
2990                     DAG.getVTList(ContainerVT, MVT::Other),
2991                     {Chain, Src, Src, DAG.getUNDEF(ContainerVT), Unorder, VL});
2992   Chain = Src.getValue(1);
2993 
2994   // We do the conversion on the absolute value and fix the sign at the end.
2995   SDValue Abs = DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, Mask, VL);
2996 
2997   // Determine the largest integer that can be represented exactly. This and
2998   // values larger than it don't have any fractional bits so don't need to
2999   // be converted.
3000   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(ContainerVT);
3001   unsigned Precision = APFloat::semanticsPrecision(FltSem);
3002   APFloat MaxVal = APFloat(FltSem);
3003   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
3004                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
3005   SDValue MaxValNode =
3006       DAG.getConstantFP(MaxVal, DL, ContainerVT.getVectorElementType());
3007   SDValue MaxValSplat = DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, ContainerVT,
3008                                     DAG.getUNDEF(ContainerVT), MaxValNode, VL);
3009 
3010   // If abs(Src) was larger than MaxVal or nan, keep it.
3011   Mask = DAG.getNode(
3012       RISCVISD::SETCC_VL, DL, MaskVT,
3013       {Abs, MaxValSplat, DAG.getCondCode(ISD::SETOLT), Mask, Mask, VL});
3014 
3015   // Truncate to integer and convert back to FP.
3016   MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
3017   MVT XLenVT = Subtarget.getXLenVT();
3018   SDValue Truncated;
3019 
3020   switch (Op.getOpcode()) {
3021   default:
3022     llvm_unreachable("Unexpected opcode");
3023   case ISD::STRICT_FCEIL:
3024   case ISD::STRICT_FFLOOR:
3025   case ISD::STRICT_FROUND:
3026   case ISD::STRICT_FROUNDEVEN: {
3027     RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
3028     assert(FRM != RISCVFPRndMode::Invalid);
3029     Truncated = DAG.getNode(
3030         RISCVISD::STRICT_VFCVT_RM_X_F_VL, DL, DAG.getVTList(IntVT, MVT::Other),
3031         {Chain, Src, Mask, DAG.getTargetConstant(FRM, DL, XLenVT), VL});
3032     break;
3033   }
3034   case ISD::STRICT_FTRUNC:
3035     Truncated =
3036         DAG.getNode(RISCVISD::STRICT_VFCVT_RTZ_X_F_VL, DL,
3037                     DAG.getVTList(IntVT, MVT::Other), Chain, Src, Mask, VL);
3038     break;
3039   case ISD::STRICT_FNEARBYINT:
3040     Truncated = DAG.getNode(RISCVISD::STRICT_VFROUND_NOEXCEPT_VL, DL,
3041                             DAG.getVTList(ContainerVT, MVT::Other), Chain, Src,
3042                             Mask, VL);
3043     break;
3044   }
3045   Chain = Truncated.getValue(1);
3046 
3047   // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL.
3048   if (Op.getOpcode() != ISD::STRICT_FNEARBYINT) {
3049     Truncated = DAG.getNode(RISCVISD::STRICT_SINT_TO_FP_VL, DL,
3050                             DAG.getVTList(ContainerVT, MVT::Other), Chain,
3051                             Truncated, Mask, VL);
3052     Chain = Truncated.getValue(1);
3053   }
3054 
3055   // Restore the original sign so that -0.0 is preserved.
3056   Truncated = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Truncated,
3057                           Src, Src, Mask, VL);
3058 
3059   if (VT.isFixedLengthVector())
3060     Truncated = convertFromScalableVector(VT, Truncated, DAG, Subtarget);
3061   return DAG.getMergeValues({Truncated, Chain}, DL);
3062 }
3063 
3064 static SDValue
3065 lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3066                                 const RISCVSubtarget &Subtarget) {
3067   MVT VT = Op.getSimpleValueType();
3068   if (VT.isVector())
3069     return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
3070 
3071   if (DAG.shouldOptForSize())
3072     return SDValue();
3073 
3074   SDLoc DL(Op);
3075   SDValue Src = Op.getOperand(0);
3076 
3077   // Create an integer the size of the mantissa with the MSB set. This and all
3078   // values larger than it don't have any fractional bits so don't need to be
3079   // converted.
3080   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
3081   unsigned Precision = APFloat::semanticsPrecision(FltSem);
3082   APFloat MaxVal = APFloat(FltSem);
3083   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
3084                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
3085   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
3086 
3087   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
3088   return DAG.getNode(RISCVISD::FROUND, DL, VT, Src, MaxValNode,
3089                      DAG.getTargetConstant(FRM, DL, Subtarget.getXLenVT()));
3090 }
3091 
3092 // Expand vector LRINT and LLRINT by converting to the integer domain.
3093 static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
3094                                 const RISCVSubtarget &Subtarget) {
3095   MVT VT = Op.getSimpleValueType();
3096   assert(VT.isVector() && "Unexpected type");
3097 
3098   SDLoc DL(Op);
3099   SDValue Src = Op.getOperand(0);
3100   MVT ContainerVT = VT;
3101 
3102   if (VT.isFixedLengthVector()) {
3103     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3104     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3105   }
3106 
3107   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3108   SDValue Truncated =
3109       DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, ContainerVT, Src, Mask, VL);
3110 
3111   if (!VT.isFixedLengthVector())
3112     return Truncated;
3113 
3114   return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
3115 }
3116 
3117 static SDValue
3118 getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
3119               const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op,
3120               SDValue Offset, SDValue Mask, SDValue VL,
3121               unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
3122   if (Merge.isUndef())
3123     Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
3124   SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
3125   SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
3126   return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
3127 }
3128 
3129 static SDValue
3130 getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
3131             EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask,
3132             SDValue VL,
3133             unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
3134   if (Merge.isUndef())
3135     Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
3136   SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
3137   SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
3138   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
3139 }
3140 
3141 static MVT getLMUL1VT(MVT VT) {
3142   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3143          "Unexpected vector MVT");
3144   return MVT::getScalableVectorVT(
3145       VT.getVectorElementType(),
3146       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3147 }
3148 
3149 struct VIDSequence {
3150   int64_t StepNumerator;
3151   unsigned StepDenominator;
3152   int64_t Addend;
3153 };
3154 
3155 static std::optional<uint64_t> getExactInteger(const APFloat &APF,
3156                                                uint32_t BitWidth) {
3157   APSInt ValInt(BitWidth, !APF.isNegative());
3158   // We use an arbitrary rounding mode here. If a floating-point is an exact
3159   // integer (e.g., 1.0), the rounding mode does not affect the output value. If
3160   // the rounding mode changes the output value, then it is not an exact
3161   // integer.
3162   RoundingMode ArbitraryRM = RoundingMode::TowardZero;
3163   bool IsExact;
3164   // If it is out of signed integer range, it will return an invalid operation.
3165   // If it is not an exact integer, IsExact is false.
3166   if ((APF.convertToInteger(ValInt, ArbitraryRM, &IsExact) ==
3167        APFloatBase::opInvalidOp) ||
3168       !IsExact)
3169     return std::nullopt;
3170   return ValInt.extractBitsAsZExtValue(BitWidth, 0);
3171 }
3172 
3173 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
3174 // to the (non-zero) step S and start value X. This can be then lowered as the
3175 // RVV sequence (VID * S) + X, for example.
3176 // The step S is represented as an integer numerator divided by a positive
3177 // denominator. Note that the implementation currently only identifies
3178 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
3179 // cannot detect 2/3, for example.
3180 // Note that this method will also match potentially unappealing index
3181 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
3182 // determine whether this is worth generating code for.
3183 static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
3184   unsigned NumElts = Op.getNumOperands();
3185   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
3186   bool IsInteger = Op.getValueType().isInteger();
3187 
3188   std::optional<unsigned> SeqStepDenom;
3189   std::optional<int64_t> SeqStepNum, SeqAddend;
3190   std::optional<std::pair<uint64_t, unsigned>> PrevElt;
3191   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
3192   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
3193     // Assume undef elements match the sequence; we just have to be careful
3194     // when interpolating across them.
3195     if (Op.getOperand(Idx).isUndef())
3196       continue;
3197 
3198     uint64_t Val;
3199     if (IsInteger) {
3200       // The BUILD_VECTOR must be all constants.
3201       if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
3202         return std::nullopt;
3203       Val = Op.getConstantOperandVal(Idx) &
3204             maskTrailingOnes<uint64_t>(EltSizeInBits);
3205     } else {
3206       // The BUILD_VECTOR must be all constants.
3207       if (!isa<ConstantFPSDNode>(Op.getOperand(Idx)))
3208         return std::nullopt;
3209       if (auto ExactInteger = getExactInteger(
3210               cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
3211               EltSizeInBits))
3212         Val = *ExactInteger;
3213       else
3214         return std::nullopt;
3215     }
3216 
3217     if (PrevElt) {
3218       // Calculate the step since the last non-undef element, and ensure
3219       // it's consistent across the entire sequence.
3220       unsigned IdxDiff = Idx - PrevElt->second;
3221       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
3222 
3223       // A zero-value value difference means that we're somewhere in the middle
3224       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
3225       // step change before evaluating the sequence.
3226       if (ValDiff == 0)
3227         continue;
3228 
3229       int64_t Remainder = ValDiff % IdxDiff;
3230       // Normalize the step if it's greater than 1.
3231       if (Remainder != ValDiff) {
3232         // The difference must cleanly divide the element span.
3233         if (Remainder != 0)
3234           return std::nullopt;
3235         ValDiff /= IdxDiff;
3236         IdxDiff = 1;
3237       }
3238 
3239       if (!SeqStepNum)
3240         SeqStepNum = ValDiff;
3241       else if (ValDiff != SeqStepNum)
3242         return std::nullopt;
3243 
3244       if (!SeqStepDenom)
3245         SeqStepDenom = IdxDiff;
3246       else if (IdxDiff != *SeqStepDenom)
3247         return std::nullopt;
3248     }
3249 
3250     // Record this non-undef element for later.
3251     if (!PrevElt || PrevElt->first != Val)
3252       PrevElt = std::make_pair(Val, Idx);
3253   }
3254 
3255   // We need to have logged a step for this to count as a legal index sequence.
3256   if (!SeqStepNum || !SeqStepDenom)
3257     return std::nullopt;
3258 
3259   // Loop back through the sequence and validate elements we might have skipped
3260   // while waiting for a valid step. While doing this, log any sequence addend.
3261   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
3262     if (Op.getOperand(Idx).isUndef())
3263       continue;
3264     uint64_t Val;
3265     if (IsInteger) {
3266       Val = Op.getConstantOperandVal(Idx) &
3267             maskTrailingOnes<uint64_t>(EltSizeInBits);
3268     } else {
3269       Val = *getExactInteger(
3270           cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
3271           EltSizeInBits);
3272     }
3273     uint64_t ExpectedVal =
3274         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
3275     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
3276     if (!SeqAddend)
3277       SeqAddend = Addend;
3278     else if (Addend != SeqAddend)
3279       return std::nullopt;
3280   }
3281 
3282   assert(SeqAddend && "Must have an addend if we have a step");
3283 
3284   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
3285 }
3286 
3287 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
3288 // and lower it as a VRGATHER_VX_VL from the source vector.
3289 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
3290                                   SelectionDAG &DAG,
3291                                   const RISCVSubtarget &Subtarget) {
3292   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
3293     return SDValue();
3294   SDValue Vec = SplatVal.getOperand(0);
3295   // Only perform this optimization on vectors of the same size for simplicity.
3296   // Don't perform this optimization for i1 vectors.
3297   // FIXME: Support i1 vectors, maybe by promoting to i8?
3298   if (Vec.getValueType() != VT || VT.getVectorElementType() == MVT::i1)
3299     return SDValue();
3300   SDValue Idx = SplatVal.getOperand(1);
3301   // The index must be a legal type.
3302   if (Idx.getValueType() != Subtarget.getXLenVT())
3303     return SDValue();
3304 
3305   MVT ContainerVT = VT;
3306   if (VT.isFixedLengthVector()) {
3307     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3308     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3309   }
3310 
3311   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3312 
3313   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
3314                                Idx, DAG.getUNDEF(ContainerVT), Mask, VL);
3315 
3316   if (!VT.isFixedLengthVector())
3317     return Gather;
3318 
3319   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
3320 }
3321 
3322 
3323 /// Try and optimize BUILD_VECTORs with "dominant values" - these are values
3324 /// which constitute a large proportion of the elements. In such cases we can
3325 /// splat a vector with the dominant element and make up the shortfall with
3326 /// INSERT_VECTOR_ELTs.  Returns SDValue if not profitable.
3327 /// Note that this includes vectors of 2 elements by association. The
3328 /// upper-most element is the "dominant" one, allowing us to use a splat to
3329 /// "insert" the upper element, and an insert of the lower element at position
3330 /// 0, which improves codegen.
3331 static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
3332                                                  const RISCVSubtarget &Subtarget) {
3333   MVT VT = Op.getSimpleValueType();
3334   assert(VT.isFixedLengthVector() && "Unexpected vector!");
3335 
3336   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3337 
3338   SDLoc DL(Op);
3339   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3340 
3341   MVT XLenVT = Subtarget.getXLenVT();
3342   unsigned NumElts = Op.getNumOperands();
3343 
3344   SDValue DominantValue;
3345   unsigned MostCommonCount = 0;
3346   DenseMap<SDValue, unsigned> ValueCounts;
3347   unsigned NumUndefElts =
3348       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
3349 
3350   // Track the number of scalar loads we know we'd be inserting, estimated as
3351   // any non-zero floating-point constant. Other kinds of element are either
3352   // already in registers or are materialized on demand. The threshold at which
3353   // a vector load is more desirable than several scalar materializion and
3354   // vector-insertion instructions is not known.
3355   unsigned NumScalarLoads = 0;
3356 
3357   for (SDValue V : Op->op_values()) {
3358     if (V.isUndef())
3359       continue;
3360 
3361     ValueCounts.insert(std::make_pair(V, 0));
3362     unsigned &Count = ValueCounts[V];
3363     if (0 == Count)
3364       if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
3365         NumScalarLoads += !CFP->isExactlyValue(+0.0);
3366 
3367     // Is this value dominant? In case of a tie, prefer the highest element as
3368     // it's cheaper to insert near the beginning of a vector than it is at the
3369     // end.
3370     if (++Count >= MostCommonCount) {
3371       DominantValue = V;
3372       MostCommonCount = Count;
3373     }
3374   }
3375 
3376   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
3377   unsigned NumDefElts = NumElts - NumUndefElts;
3378   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
3379 
3380   // Don't perform this optimization when optimizing for size, since
3381   // materializing elements and inserting them tends to cause code bloat.
3382   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
3383       (NumElts != 2 || ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) &&
3384       ((MostCommonCount > DominantValueCountThreshold) ||
3385        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
3386     // Start by splatting the most common element.
3387     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
3388 
3389     DenseSet<SDValue> Processed{DominantValue};
3390 
3391     // We can handle an insert into the last element (of a splat) via
3392     // v(f)slide1down.  This is slightly better than the vslideup insert
3393     // lowering as it avoids the need for a vector group temporary.  It
3394     // is also better than using vmerge.vx as it avoids the need to
3395     // materialize the mask in a vector register.
3396     if (SDValue LastOp = Op->getOperand(Op->getNumOperands() - 1);
3397         !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
3398         LastOp != DominantValue) {
3399       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3400       auto OpCode =
3401         VT.isFloatingPoint() ? RISCVISD::VFSLIDE1DOWN_VL : RISCVISD::VSLIDE1DOWN_VL;
3402       if (!VT.isFloatingPoint())
3403         LastOp = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, LastOp);
3404       Vec = DAG.getNode(OpCode, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Vec,
3405                         LastOp, Mask, VL);
3406       Vec = convertFromScalableVector(VT, Vec, DAG, Subtarget);
3407       Processed.insert(LastOp);
3408     }
3409 
3410     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
3411     for (const auto &OpIdx : enumerate(Op->ops())) {
3412       const SDValue &V = OpIdx.value();
3413       if (V.isUndef() || !Processed.insert(V).second)
3414         continue;
3415       if (ValueCounts[V] == 1) {
3416         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
3417                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
3418       } else {
3419         // Blend in all instances of this value using a VSELECT, using a
3420         // mask where each bit signals whether that element is the one
3421         // we're after.
3422         SmallVector<SDValue> Ops;
3423         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
3424           return DAG.getConstant(V == V1, DL, XLenVT);
3425         });
3426         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
3427                           DAG.getBuildVector(SelMaskTy, DL, Ops),
3428                           DAG.getSplatBuildVector(VT, DL, V), Vec);
3429       }
3430     }
3431 
3432     return Vec;
3433   }
3434 
3435   return SDValue();
3436 }
3437 
3438 static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
3439                                            const RISCVSubtarget &Subtarget) {
3440   MVT VT = Op.getSimpleValueType();
3441   assert(VT.isFixedLengthVector() && "Unexpected vector!");
3442 
3443   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3444 
3445   SDLoc DL(Op);
3446   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3447 
3448   MVT XLenVT = Subtarget.getXLenVT();
3449   unsigned NumElts = Op.getNumOperands();
3450 
3451   if (VT.getVectorElementType() == MVT::i1) {
3452     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
3453       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
3454       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
3455     }
3456 
3457     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
3458       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
3459       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
3460     }
3461 
3462     // Lower constant mask BUILD_VECTORs via an integer vector type, in
3463     // scalar integer chunks whose bit-width depends on the number of mask
3464     // bits and XLEN.
3465     // First, determine the most appropriate scalar integer type to use. This
3466     // is at most XLenVT, but may be shrunk to a smaller vector element type
3467     // according to the size of the final vector - use i8 chunks rather than
3468     // XLenVT if we're producing a v8i1. This results in more consistent
3469     // codegen across RV32 and RV64.
3470     unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
3471     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
3472     // If we have to use more than one INSERT_VECTOR_ELT then this
3473     // optimization is likely to increase code size; avoid peforming it in
3474     // such a case. We can use a load from a constant pool in this case.
3475     if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
3476       return SDValue();
3477     // Now we can create our integer vector type. Note that it may be larger
3478     // than the resulting mask type: v4i1 would use v1i8 as its integer type.
3479     unsigned IntegerViaVecElts = divideCeil(NumElts, NumViaIntegerBits);
3480     MVT IntegerViaVecVT =
3481       MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
3482                        IntegerViaVecElts);
3483 
3484     uint64_t Bits = 0;
3485     unsigned BitPos = 0, IntegerEltIdx = 0;
3486     SmallVector<SDValue, 8> Elts(IntegerViaVecElts);
3487 
3488     for (unsigned I = 0; I < NumElts;) {
3489       SDValue V = Op.getOperand(I);
3490       bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
3491       Bits |= ((uint64_t)BitValue << BitPos);
3492       ++BitPos;
3493       ++I;
3494 
3495       // Once we accumulate enough bits to fill our scalar type or process the
3496       // last element, insert into our vector and clear our accumulated data.
3497       if (I % NumViaIntegerBits == 0 || I == NumElts) {
3498         if (NumViaIntegerBits <= 32)
3499           Bits = SignExtend64<32>(Bits);
3500         SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
3501         Elts[IntegerEltIdx] = Elt;
3502         Bits = 0;
3503         BitPos = 0;
3504         IntegerEltIdx++;
3505       }
3506     }
3507 
3508     SDValue Vec = DAG.getBuildVector(IntegerViaVecVT, DL, Elts);
3509 
3510     if (NumElts < NumViaIntegerBits) {
3511       // If we're producing a smaller vector than our minimum legal integer
3512       // type, bitcast to the equivalent (known-legal) mask type, and extract
3513       // our final mask.
3514       assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
3515       Vec = DAG.getBitcast(MVT::v8i1, Vec);
3516       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
3517                         DAG.getConstant(0, DL, XLenVT));
3518     } else {
3519       // Else we must have produced an integer type with the same size as the
3520       // mask type; bitcast for the final result.
3521       assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
3522       Vec = DAG.getBitcast(VT, Vec);
3523     }
3524 
3525     return Vec;
3526   }
3527 
3528   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
3529     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
3530                                         : RISCVISD::VMV_V_X_VL;
3531     if (!VT.isFloatingPoint())
3532       Splat = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Splat);
3533     Splat =
3534         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
3535     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
3536   }
3537 
3538   // Try and match index sequences, which we can lower to the vid instruction
3539   // with optional modifications. An all-undef vector is matched by
3540   // getSplatValue, above.
3541   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
3542     int64_t StepNumerator = SimpleVID->StepNumerator;
3543     unsigned StepDenominator = SimpleVID->StepDenominator;
3544     int64_t Addend = SimpleVID->Addend;
3545 
3546     assert(StepNumerator != 0 && "Invalid step");
3547     bool Negate = false;
3548     int64_t SplatStepVal = StepNumerator;
3549     unsigned StepOpcode = ISD::MUL;
3550     // Exclude INT64_MIN to avoid passing it to std::abs. We won't optimize it
3551     // anyway as the shift of 63 won't fit in uimm5.
3552     if (StepNumerator != 1 && StepNumerator != INT64_MIN &&
3553         isPowerOf2_64(std::abs(StepNumerator))) {
3554       Negate = StepNumerator < 0;
3555       StepOpcode = ISD::SHL;
3556       SplatStepVal = Log2_64(std::abs(StepNumerator));
3557     }
3558 
3559     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
3560     // threshold since it's the immediate value many RVV instructions accept.
3561     // There is no vmul.vi instruction so ensure multiply constant can fit in
3562     // a single addi instruction.
3563     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
3564          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
3565         isPowerOf2_32(StepDenominator) &&
3566         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
3567       MVT VIDVT =
3568           VT.isFloatingPoint() ? VT.changeVectorElementTypeToInteger() : VT;
3569       MVT VIDContainerVT =
3570           getContainerForFixedLengthVector(DAG, VIDVT, Subtarget);
3571       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VIDContainerVT, Mask, VL);
3572       // Convert right out of the scalable type so we can use standard ISD
3573       // nodes for the rest of the computation. If we used scalable types with
3574       // these, we'd lose the fixed-length vector info and generate worse
3575       // vsetvli code.
3576       VID = convertFromScalableVector(VIDVT, VID, DAG, Subtarget);
3577       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
3578           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
3579         SDValue SplatStep = DAG.getConstant(SplatStepVal, DL, VIDVT);
3580         VID = DAG.getNode(StepOpcode, DL, VIDVT, VID, SplatStep);
3581       }
3582       if (StepDenominator != 1) {
3583         SDValue SplatStep =
3584             DAG.getConstant(Log2_64(StepDenominator), DL, VIDVT);
3585         VID = DAG.getNode(ISD::SRL, DL, VIDVT, VID, SplatStep);
3586       }
3587       if (Addend != 0 || Negate) {
3588         SDValue SplatAddend = DAG.getConstant(Addend, DL, VIDVT);
3589         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VIDVT, SplatAddend,
3590                           VID);
3591       }
3592       if (VT.isFloatingPoint()) {
3593         // TODO: Use vfwcvt to reduce register pressure.
3594         VID = DAG.getNode(ISD::SINT_TO_FP, DL, VT, VID);
3595       }
3596       return VID;
3597     }
3598   }
3599 
3600   // For very small build_vectors, use a single scalar insert of a constant.
3601   // TODO: Base this on constant rematerialization cost, not size.
3602   const unsigned EltBitSize = VT.getScalarSizeInBits();
3603   if (VT.getSizeInBits() <= 32 &&
3604       ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
3605     MVT ViaIntVT = MVT::getIntegerVT(VT.getSizeInBits());
3606     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32) &&
3607            "Unexpected sequence type");
3608     // If we can use the original VL with the modified element type, this
3609     // means we only have a VTYPE toggle, not a VL toggle.  TODO: Should this
3610     // be moved into InsertVSETVLI?
3611     unsigned ViaVecLen =
3612       (Subtarget.getRealMinVLen() >= VT.getSizeInBits() * NumElts) ? NumElts : 1;
3613     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, ViaVecLen);
3614 
3615     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3616     uint64_t SplatValue = 0;
3617     // Construct the amalgamated value at this larger vector type.
3618     for (const auto &OpIdx : enumerate(Op->op_values())) {
3619       const auto &SeqV = OpIdx.value();
3620       if (!SeqV.isUndef())
3621         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
3622                        << (OpIdx.index() * EltBitSize));
3623     }
3624 
3625     // On RV64, sign-extend from 32 to 64 bits where possible in order to
3626     // achieve better constant materializion.
3627     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
3628       SplatValue = SignExtend64<32>(SplatValue);
3629 
3630     SDValue Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ViaVecVT,
3631                               DAG.getUNDEF(ViaVecVT),
3632                               DAG.getConstant(SplatValue, DL, XLenVT),
3633                               DAG.getConstant(0, DL, XLenVT));
3634     if (ViaVecLen != 1)
3635       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
3636                         MVT::getVectorVT(ViaIntVT, 1), Vec,
3637                         DAG.getConstant(0, DL, XLenVT));
3638     return DAG.getBitcast(VT, Vec);
3639   }
3640 
3641 
3642   // Attempt to detect "hidden" splats, which only reveal themselves as splats
3643   // when re-interpreted as a vector with a larger element type. For example,
3644   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
3645   // could be instead splat as
3646   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
3647   // TODO: This optimization could also work on non-constant splats, but it
3648   // would require bit-manipulation instructions to construct the splat value.
3649   SmallVector<SDValue> Sequence;
3650   const auto *BV = cast<BuildVectorSDNode>(Op);
3651   if (VT.isInteger() && EltBitSize < 64 &&
3652       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
3653       BV->getRepeatedSequence(Sequence) &&
3654       (Sequence.size() * EltBitSize) <= 64) {
3655     unsigned SeqLen = Sequence.size();
3656     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
3657     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
3658             ViaIntVT == MVT::i64) &&
3659            "Unexpected sequence type");
3660 
3661     // If we can use the original VL with the modified element type, this
3662     // means we only have a VTYPE toggle, not a VL toggle.  TODO: Should this
3663     // be moved into InsertVSETVLI?
3664     const unsigned RequiredVL = NumElts / SeqLen;
3665     const unsigned ViaVecLen =
3666       (Subtarget.getRealMinVLen() >= ViaIntVT.getSizeInBits() * NumElts) ?
3667       NumElts : RequiredVL;
3668     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, ViaVecLen);
3669 
3670     unsigned EltIdx = 0;
3671     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3672     uint64_t SplatValue = 0;
3673     // Construct the amalgamated value which can be splatted as this larger
3674     // vector type.
3675     for (const auto &SeqV : Sequence) {
3676       if (!SeqV.isUndef())
3677         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
3678                        << (EltIdx * EltBitSize));
3679       EltIdx++;
3680     }
3681 
3682     // On RV64, sign-extend from 32 to 64 bits where possible in order to
3683     // achieve better constant materializion.
3684     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
3685       SplatValue = SignExtend64<32>(SplatValue);
3686 
3687     // Since we can't introduce illegal i64 types at this stage, we can only
3688     // perform an i64 splat on RV32 if it is its own sign-extended value. That
3689     // way we can use RVV instructions to splat.
3690     assert((ViaIntVT.bitsLE(XLenVT) ||
3691             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
3692            "Unexpected bitcast sequence");
3693     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
3694       SDValue ViaVL =
3695           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
3696       MVT ViaContainerVT =
3697           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
3698       SDValue Splat =
3699           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
3700                       DAG.getUNDEF(ViaContainerVT),
3701                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
3702       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
3703       if (ViaVecLen != RequiredVL)
3704         Splat = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
3705                             MVT::getVectorVT(ViaIntVT, RequiredVL), Splat,
3706                             DAG.getConstant(0, DL, XLenVT));
3707       return DAG.getBitcast(VT, Splat);
3708     }
3709   }
3710 
3711   // If the number of signbits allows, see if we can lower as a <N x i8>.
3712   // Our main goal here is to reduce LMUL (and thus work) required to
3713   // build the constant, but we will also narrow if the resulting
3714   // narrow vector is known to materialize cheaply.
3715   // TODO: We really should be costing the smaller vector.  There are
3716   // profitable cases this misses.
3717   if (EltBitSize > 8 && VT.isInteger() &&
3718       (NumElts <= 4 || VT.getSizeInBits() > Subtarget.getRealMinVLen())) {
3719     unsigned SignBits = DAG.ComputeNumSignBits(Op);
3720     if (EltBitSize - SignBits < 8) {
3721       SDValue Source = DAG.getBuildVector(VT.changeVectorElementType(MVT::i8),
3722                                           DL, Op->ops());
3723       Source = convertToScalableVector(ContainerVT.changeVectorElementType(MVT::i8),
3724                                        Source, DAG, Subtarget);
3725       SDValue Res = DAG.getNode(RISCVISD::VSEXT_VL, DL, ContainerVT, Source, Mask, VL);
3726       return convertFromScalableVector(VT, Res, DAG, Subtarget);
3727     }
3728   }
3729 
3730   if (SDValue Res = lowerBuildVectorViaDominantValues(Op, DAG, Subtarget))
3731     return Res;
3732 
3733   // For constant vectors, use generic constant pool lowering.  Otherwise,
3734   // we'd have to materialize constants in GPRs just to move them into the
3735   // vector.
3736   return SDValue();
3737 }
3738 
3739 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3740                                  const RISCVSubtarget &Subtarget) {
3741   MVT VT = Op.getSimpleValueType();
3742   assert(VT.isFixedLengthVector() && "Unexpected vector!");
3743 
3744   if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
3745       ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
3746     return lowerBuildVectorOfConstants(Op, DAG, Subtarget);
3747 
3748   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3749 
3750   SDLoc DL(Op);
3751   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3752 
3753   MVT XLenVT = Subtarget.getXLenVT();
3754 
3755   if (VT.getVectorElementType() == MVT::i1) {
3756     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
3757     // vector type, we have a legal equivalently-sized i8 type, so we can use
3758     // that.
3759     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
3760     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
3761 
3762     SDValue WideVec;
3763     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
3764       // For a splat, perform a scalar truncate before creating the wider
3765       // vector.
3766       Splat = DAG.getNode(ISD::AND, DL, Splat.getValueType(), Splat,
3767                           DAG.getConstant(1, DL, Splat.getValueType()));
3768       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
3769     } else {
3770       SmallVector<SDValue, 8> Ops(Op->op_values());
3771       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
3772       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
3773       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
3774     }
3775 
3776     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
3777   }
3778 
3779   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
3780     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
3781       return Gather;
3782     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
3783                                         : RISCVISD::VMV_V_X_VL;
3784     if (!VT.isFloatingPoint())
3785       Splat = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Splat);
3786     Splat =
3787         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
3788     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
3789   }
3790 
3791   if (SDValue Res = lowerBuildVectorViaDominantValues(Op, DAG, Subtarget))
3792     return Res;
3793 
3794   // If we're compiling for an exact VLEN value, we can split our work per
3795   // register in the register group.
3796   const unsigned MinVLen = Subtarget.getRealMinVLen();
3797   const unsigned MaxVLen = Subtarget.getRealMaxVLen();
3798   if (MinVLen == MaxVLen && VT.getSizeInBits().getKnownMinValue() > MinVLen) {
3799     MVT ElemVT = VT.getVectorElementType();
3800     unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
3801     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3802     MVT OneRegVT = MVT::getVectorVT(ElemVT, ElemsPerVReg);
3803     MVT M1VT = getContainerForFixedLengthVector(DAG, OneRegVT, Subtarget);
3804     assert(M1VT == getLMUL1VT(M1VT));
3805 
3806     // The following semantically builds up a fixed length concat_vector
3807     // of the component build_vectors.  We eagerly lower to scalable and
3808     // insert_subvector here to avoid DAG combining it back to a large
3809     // build_vector.
3810     SmallVector<SDValue> BuildVectorOps(Op->op_begin(), Op->op_end());
3811     unsigned NumOpElts = M1VT.getVectorMinNumElements();
3812     SDValue Vec = DAG.getUNDEF(ContainerVT);
3813     for (unsigned i = 0; i < VT.getVectorNumElements(); i += ElemsPerVReg) {
3814       auto OneVRegOfOps = ArrayRef(BuildVectorOps).slice(i, ElemsPerVReg);
3815       SDValue SubBV =
3816           DAG.getNode(ISD::BUILD_VECTOR, DL, OneRegVT, OneVRegOfOps);
3817       SubBV = convertToScalableVector(M1VT, SubBV, DAG, Subtarget);
3818       unsigned InsertIdx = (i / ElemsPerVReg) * NumOpElts;
3819       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Vec, SubBV,
3820                         DAG.getVectorIdxConstant(InsertIdx, DL));
3821     }
3822     return convertFromScalableVector(VT, Vec, DAG, Subtarget);
3823   }
3824 
3825   // Cap the cost at a value linear to the number of elements in the vector.
3826   // The default lowering is to use the stack.  The vector store + scalar loads
3827   // is linear in VL.  However, at high lmuls vslide1down and vslidedown end up
3828   // being (at least) linear in LMUL.  As a result, using the vslidedown
3829   // lowering for every element ends up being VL*LMUL..
3830   // TODO: Should we be directly costing the stack alternative?  Doing so might
3831   // give us a more accurate upper bound.
3832   InstructionCost LinearBudget = VT.getVectorNumElements() * 2;
3833 
3834   // TODO: unify with TTI getSlideCost.
3835   InstructionCost PerSlideCost = 1;
3836   switch (RISCVTargetLowering::getLMUL(ContainerVT)) {
3837   default: break;
3838   case RISCVII::VLMUL::LMUL_2:
3839     PerSlideCost = 2;
3840     break;
3841   case RISCVII::VLMUL::LMUL_4:
3842     PerSlideCost = 4;
3843     break;
3844   case RISCVII::VLMUL::LMUL_8:
3845     PerSlideCost = 8;
3846     break;
3847   }
3848 
3849   // TODO: Should we be using the build instseq then cost + evaluate scheme
3850   // we use for integer constants here?
3851   unsigned UndefCount = 0;
3852   for (const SDValue &V : Op->ops()) {
3853     if (V.isUndef()) {
3854       UndefCount++;
3855       continue;
3856     }
3857     if (UndefCount) {
3858       LinearBudget -= PerSlideCost;
3859       UndefCount = 0;
3860     }
3861     LinearBudget -= PerSlideCost;
3862   }
3863   if (UndefCount) {
3864     LinearBudget -= PerSlideCost;
3865   }
3866 
3867   if (LinearBudget < 0)
3868     return SDValue();
3869 
3870   assert((!VT.isFloatingPoint() ||
3871           VT.getVectorElementType().getSizeInBits() <= Subtarget.getFLen()) &&
3872          "Illegal type which will result in reserved encoding");
3873 
3874   const unsigned Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
3875 
3876   SDValue Vec;
3877   UndefCount = 0;
3878   for (SDValue V : Op->ops()) {
3879     if (V.isUndef()) {
3880       UndefCount++;
3881       continue;
3882     }
3883 
3884     // Start our sequence with a TA splat in the hopes that hardware is able to
3885     // recognize there's no dependency on the prior value of our temporary
3886     // register.
3887     if (!Vec) {
3888       Vec = DAG.getSplatVector(VT, DL, V);
3889       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3890       UndefCount = 0;
3891       continue;
3892     }
3893 
3894     if (UndefCount) {
3895       const SDValue Offset = DAG.getConstant(UndefCount, DL, Subtarget.getXLenVT());
3896       Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
3897                           Vec, Offset, Mask, VL, Policy);
3898       UndefCount = 0;
3899     }
3900     auto OpCode =
3901       VT.isFloatingPoint() ? RISCVISD::VFSLIDE1DOWN_VL : RISCVISD::VSLIDE1DOWN_VL;
3902     if (!VT.isFloatingPoint())
3903       V = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), V);
3904     Vec = DAG.getNode(OpCode, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Vec,
3905                       V, Mask, VL);
3906   }
3907   if (UndefCount) {
3908     const SDValue Offset = DAG.getConstant(UndefCount, DL, Subtarget.getXLenVT());
3909     Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
3910                         Vec, Offset, Mask, VL, Policy);
3911   }
3912   return convertFromScalableVector(VT, Vec, DAG, Subtarget);
3913 }
3914 
3915 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
3916                                    SDValue Lo, SDValue Hi, SDValue VL,
3917                                    SelectionDAG &DAG) {
3918   if (!Passthru)
3919     Passthru = DAG.getUNDEF(VT);
3920   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3921     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3922     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3923     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3924     // node in order to try and match RVV vector/scalar instructions.
3925     if ((LoC >> 31) == HiC)
3926       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
3927 
3928     // If vl is equal to VLMAX or fits in 4 bits and Hi constant is equal to Lo,
3929     // we could use vmv.v.x whose EEW = 32 to lower it. This allows us to use
3930     // vlmax vsetvli or vsetivli to change the VL.
3931     // FIXME: Support larger constants?
3932     // FIXME: Support non-constant VLs by saturating?
3933     if (LoC == HiC) {
3934       SDValue NewVL;
3935       if (isAllOnesConstant(VL) ||
3936           (isa<RegisterSDNode>(VL) &&
3937            cast<RegisterSDNode>(VL)->getReg() == RISCV::X0))
3938         NewVL = DAG.getRegister(RISCV::X0, MVT::i32);
3939       else if (isa<ConstantSDNode>(VL) &&
3940                isUInt<4>(cast<ConstantSDNode>(VL)->getZExtValue()))
3941         NewVL = DAG.getNode(ISD::ADD, DL, VL.getValueType(), VL, VL);
3942 
3943       if (NewVL) {
3944         MVT InterVT =
3945             MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3946         auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT,
3947                                     DAG.getUNDEF(InterVT), Lo,
3948                                     DAG.getRegister(RISCV::X0, MVT::i32));
3949         return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
3950       }
3951     }
3952   }
3953 
3954   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3955   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3956       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3957       Hi.getConstantOperandVal(1) == 31)
3958     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
3959 
3960   // If the hi bits of the splat are undefined, then it's fine to just splat Lo
3961   // even if it might be sign extended.
3962   if (Hi.isUndef())
3963     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
3964 
3965   // Fall back to a stack store and stride x0 vector load.
3966   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
3967                      Hi, VL);
3968 }
3969 
3970 // Called by type legalization to handle splat of i64 on RV32.
3971 // FIXME: We can optimize this when the type has sign or zero bits in one
3972 // of the halves.
3973 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
3974                                    SDValue Scalar, SDValue VL,
3975                                    SelectionDAG &DAG) {
3976   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
3977   SDValue Lo, Hi;
3978   std::tie(Lo, Hi) = DAG.SplitScalar(Scalar, DL, MVT::i32, MVT::i32);
3979   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
3980 }
3981 
3982 // This function lowers a splat of a scalar operand Splat with the vector
3983 // length VL. It ensures the final sequence is type legal, which is useful when
3984 // lowering a splat after type legalization.
3985 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
3986                                 MVT VT, const SDLoc &DL, SelectionDAG &DAG,
3987                                 const RISCVSubtarget &Subtarget) {
3988   bool HasPassthru = Passthru && !Passthru.isUndef();
3989   if (!HasPassthru && !Passthru)
3990     Passthru = DAG.getUNDEF(VT);
3991   if (VT.isFloatingPoint())
3992     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
3993 
3994   MVT XLenVT = Subtarget.getXLenVT();
3995 
3996   // Simplest case is that the operand needs to be promoted to XLenVT.
3997   if (Scalar.getValueType().bitsLE(XLenVT)) {
3998     // If the operand is a constant, sign extend to increase our chances
3999     // of being able to use a .vi instruction. ANY_EXTEND would become a
4000     // a zero extend and the simm5 check in isel would fail.
4001     // FIXME: Should we ignore the upper bits in isel instead?
4002     unsigned ExtOpc =
4003         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4004     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
4005     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
4006   }
4007 
4008   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
4009          "Unexpected scalar for splat lowering!");
4010 
4011   if (isOneConstant(VL) && isNullConstant(Scalar))
4012     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
4013                        DAG.getConstant(0, DL, XLenVT), VL);
4014 
4015   // Otherwise use the more complicated splatting algorithm.
4016   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
4017 }
4018 
4019 // This function lowers an insert of a scalar operand Scalar into lane
4020 // 0 of the vector regardless of the value of VL.  The contents of the
4021 // remaining lanes of the result vector are unspecified.  VL is assumed
4022 // to be non-zero.
4023 static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
4024                                  const SDLoc &DL, SelectionDAG &DAG,
4025                                  const RISCVSubtarget &Subtarget) {
4026   assert(VT.isScalableVector() && "Expect VT is scalable vector type.");
4027 
4028   const MVT XLenVT = Subtarget.getXLenVT();
4029   SDValue Passthru = DAG.getUNDEF(VT);
4030 
4031   if (Scalar.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4032       isNullConstant(Scalar.getOperand(1))) {
4033     SDValue ExtractedVal = Scalar.getOperand(0);
4034     MVT ExtractedVT = ExtractedVal.getSimpleValueType();
4035     MVT ExtractedContainerVT = ExtractedVT;
4036     if (ExtractedContainerVT.isFixedLengthVector()) {
4037       ExtractedContainerVT = getContainerForFixedLengthVector(
4038           DAG, ExtractedContainerVT, Subtarget);
4039       ExtractedVal = convertToScalableVector(ExtractedContainerVT, ExtractedVal,
4040                                              DAG, Subtarget);
4041     }
4042     if (ExtractedContainerVT.bitsLE(VT))
4043       return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Passthru, ExtractedVal,
4044                          DAG.getConstant(0, DL, XLenVT));
4045     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtractedVal,
4046                        DAG.getConstant(0, DL, XLenVT));
4047   }
4048 
4049 
4050   if (VT.isFloatingPoint())
4051     return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT,
4052                        DAG.getUNDEF(VT), Scalar, VL);
4053 
4054   // Avoid the tricky legalization cases by falling back to using the
4055   // splat code which already handles it gracefully.
4056   if (!Scalar.getValueType().bitsLE(XLenVT))
4057     return lowerScalarSplat(DAG.getUNDEF(VT), Scalar,
4058                             DAG.getConstant(1, DL, XLenVT),
4059                             VT, DL, DAG, Subtarget);
4060 
4061   // If the operand is a constant, sign extend to increase our chances
4062   // of being able to use a .vi instruction. ANY_EXTEND would become a
4063   // a zero extend and the simm5 check in isel would fail.
4064   // FIXME: Should we ignore the upper bits in isel instead?
4065   unsigned ExtOpc =
4066     isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4067   Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
4068   return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT,
4069                      DAG.getUNDEF(VT), Scalar, VL);
4070 }
4071 
4072 // Is this a shuffle extracts either the even or odd elements of a vector?
4073 // That is, specifically, either (a) or (b) below.
4074 // t34: v8i8 = extract_subvector t11, Constant:i64<0>
4075 // t33: v8i8 = extract_subvector t11, Constant:i64<8>
4076 // a) t35: v8i8 = vector_shuffle<0,2,4,6,8,10,12,14> t34, t33
4077 // b) t35: v8i8 = vector_shuffle<1,3,5,7,9,11,13,15> t34, t33
4078 // Returns {Src Vector, Even Elements} om success
4079 static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
4080                                   SDValue V2, ArrayRef<int> Mask,
4081                                   const RISCVSubtarget &Subtarget) {
4082   // Need to be able to widen the vector.
4083   if (VT.getScalarSizeInBits() >= Subtarget.getELen())
4084     return false;
4085 
4086   // Both input must be extracts.
4087   if (V1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4088       V2.getOpcode() != ISD::EXTRACT_SUBVECTOR)
4089     return false;
4090 
4091   // Extracting from the same source.
4092   SDValue Src = V1.getOperand(0);
4093   if (Src != V2.getOperand(0))
4094     return false;
4095 
4096   // Src needs to have twice the number of elements.
4097   if (Src.getValueType().getVectorNumElements() != (Mask.size() * 2))
4098     return false;
4099 
4100   // The extracts must extract the two halves of the source.
4101   if (V1.getConstantOperandVal(1) != 0 ||
4102       V2.getConstantOperandVal(1) != Mask.size())
4103     return false;
4104 
4105   // First index must be the first even or odd element from V1.
4106   if (Mask[0] != 0 && Mask[0] != 1)
4107     return false;
4108 
4109   // The others must increase by 2 each time.
4110   // TODO: Support undef elements?
4111   for (unsigned i = 1; i != Mask.size(); ++i)
4112     if (Mask[i] != Mask[i - 1] + 2)
4113       return false;
4114 
4115   return true;
4116 }
4117 
4118 /// Is this shuffle interleaving contiguous elements from one vector into the
4119 /// even elements and contiguous elements from another vector into the odd
4120 /// elements. \p EvenSrc will contain the element that should be in the first
4121 /// even element. \p OddSrc will contain the element that should be in the first
4122 /// odd element. These can be the first element in a source or the element half
4123 /// way through the source.
4124 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
4125                                 int &OddSrc, const RISCVSubtarget &Subtarget) {
4126   // We need to be able to widen elements to the next larger integer type.
4127   if (VT.getScalarSizeInBits() >= Subtarget.getELen())
4128     return false;
4129 
4130   int Size = Mask.size();
4131   int NumElts = VT.getVectorNumElements();
4132   assert(Size == (int)NumElts && "Unexpected mask size");
4133 
4134   SmallVector<unsigned, 2> StartIndexes;
4135   if (!ShuffleVectorInst::isInterleaveMask(Mask, 2, Size * 2, StartIndexes))
4136     return false;
4137 
4138   EvenSrc = StartIndexes[0];
4139   OddSrc = StartIndexes[1];
4140 
4141   // One source should be low half of first vector.
4142   if (EvenSrc != 0 && OddSrc != 0)
4143     return false;
4144 
4145   // Subvectors will be subtracted from either at the start of the two input
4146   // vectors, or at the start and middle of the first vector if it's an unary
4147   // interleave.
4148   // In both cases, HalfNumElts will be extracted.
4149   // We need to ensure that the extract indices are 0 or HalfNumElts otherwise
4150   // we'll create an illegal extract_subvector.
4151   // FIXME: We could support other values using a slidedown first.
4152   int HalfNumElts = NumElts / 2;
4153   return ((EvenSrc % HalfNumElts) == 0) && ((OddSrc % HalfNumElts) == 0);
4154 }
4155 
4156 /// Match shuffles that concatenate two vectors, rotate the concatenation,
4157 /// and then extract the original number of elements from the rotated result.
4158 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
4159 /// returned rotation amount is for a rotate right, where elements move from
4160 /// higher elements to lower elements. \p LoSrc indicates the first source
4161 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
4162 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
4163 /// 0 or 1 if a rotation is found.
4164 ///
4165 /// NOTE: We talk about rotate to the right which matches how bit shift and
4166 /// rotate instructions are described where LSBs are on the right, but LLVM IR
4167 /// and the table below write vectors with the lowest elements on the left.
4168 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
4169   int Size = Mask.size();
4170 
4171   // We need to detect various ways of spelling a rotation:
4172   //   [11, 12, 13, 14, 15,  0,  1,  2]
4173   //   [-1, 12, 13, 14, -1, -1,  1, -1]
4174   //   [-1, -1, -1, -1, -1, -1,  1,  2]
4175   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
4176   //   [-1,  4,  5,  6, -1, -1,  9, -1]
4177   //   [-1,  4,  5,  6, -1, -1, -1, -1]
4178   int Rotation = 0;
4179   LoSrc = -1;
4180   HiSrc = -1;
4181   for (int i = 0; i != Size; ++i) {
4182     int M = Mask[i];
4183     if (M < 0)
4184       continue;
4185 
4186     // Determine where a rotate vector would have started.
4187     int StartIdx = i - (M % Size);
4188     // The identity rotation isn't interesting, stop.
4189     if (StartIdx == 0)
4190       return -1;
4191 
4192     // If we found the tail of a vector the rotation must be the missing
4193     // front. If we found the head of a vector, it must be how much of the
4194     // head.
4195     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
4196 
4197     if (Rotation == 0)
4198       Rotation = CandidateRotation;
4199     else if (Rotation != CandidateRotation)
4200       // The rotations don't match, so we can't match this mask.
4201       return -1;
4202 
4203     // Compute which value this mask is pointing at.
4204     int MaskSrc = M < Size ? 0 : 1;
4205 
4206     // Compute which of the two target values this index should be assigned to.
4207     // This reflects whether the high elements are remaining or the low elemnts
4208     // are remaining.
4209     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
4210 
4211     // Either set up this value if we've not encountered it before, or check
4212     // that it remains consistent.
4213     if (TargetSrc < 0)
4214       TargetSrc = MaskSrc;
4215     else if (TargetSrc != MaskSrc)
4216       // This may be a rotation, but it pulls from the inputs in some
4217       // unsupported interleaving.
4218       return -1;
4219   }
4220 
4221   // Check that we successfully analyzed the mask, and normalize the results.
4222   assert(Rotation != 0 && "Failed to locate a viable rotation!");
4223   assert((LoSrc >= 0 || HiSrc >= 0) &&
4224          "Failed to find a rotated input vector!");
4225 
4226   return Rotation;
4227 }
4228 
4229 // Lower a deinterleave shuffle to vnsrl.
4230 // [a, p, b, q, c, r, d, s] -> [a, b, c, d] (EvenElts == true)
4231 //                          -> [p, q, r, s] (EvenElts == false)
4232 // VT is the type of the vector to return, <[vscale x ]n x ty>
4233 // Src is the vector to deinterleave of type <[vscale x ]n*2 x ty>
4234 static SDValue getDeinterleaveViaVNSRL(const SDLoc &DL, MVT VT, SDValue Src,
4235                                        bool EvenElts,
4236                                        const RISCVSubtarget &Subtarget,
4237                                        SelectionDAG &DAG) {
4238   // The result is a vector of type <m x n x ty>
4239   MVT ContainerVT = VT;
4240   // Convert fixed vectors to scalable if needed
4241   if (ContainerVT.isFixedLengthVector()) {
4242     assert(Src.getSimpleValueType().isFixedLengthVector());
4243     ContainerVT = getContainerForFixedLengthVector(DAG, ContainerVT, Subtarget);
4244 
4245     // The source is a vector of type <m x n*2 x ty>
4246     MVT SrcContainerVT =
4247         MVT::getVectorVT(ContainerVT.getVectorElementType(),
4248                          ContainerVT.getVectorElementCount() * 2);
4249     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4250   }
4251 
4252   auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4253 
4254   // Bitcast the source vector from <m x n*2 x ty> -> <m x n x ty*2>
4255   // This also converts FP to int.
4256   unsigned EltBits = ContainerVT.getScalarSizeInBits();
4257   MVT WideSrcContainerVT = MVT::getVectorVT(
4258       MVT::getIntegerVT(EltBits * 2), ContainerVT.getVectorElementCount());
4259   Src = DAG.getBitcast(WideSrcContainerVT, Src);
4260 
4261   // The integer version of the container type.
4262   MVT IntContainerVT = ContainerVT.changeVectorElementTypeToInteger();
4263 
4264   // If we want even elements, then the shift amount is 0. Otherwise, shift by
4265   // the original element size.
4266   unsigned Shift = EvenElts ? 0 : EltBits;
4267   SDValue SplatShift = DAG.getNode(
4268       RISCVISD::VMV_V_X_VL, DL, IntContainerVT, DAG.getUNDEF(ContainerVT),
4269       DAG.getConstant(Shift, DL, Subtarget.getXLenVT()), VL);
4270   SDValue Res =
4271       DAG.getNode(RISCVISD::VNSRL_VL, DL, IntContainerVT, Src, SplatShift,
4272                   DAG.getUNDEF(IntContainerVT), TrueMask, VL);
4273   // Cast back to FP if needed.
4274   Res = DAG.getBitcast(ContainerVT, Res);
4275 
4276   if (VT.isFixedLengthVector())
4277     Res = convertFromScalableVector(VT, Res, DAG, Subtarget);
4278   return Res;
4279 }
4280 
4281 // Lower the following shuffle to vslidedown.
4282 // a)
4283 // t49: v8i8 = extract_subvector t13, Constant:i64<0>
4284 // t109: v8i8 = extract_subvector t13, Constant:i64<8>
4285 // t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
4286 // b)
4287 // t69: v16i16 = extract_subvector t68, Constant:i64<0>
4288 // t23: v8i16 = extract_subvector t69, Constant:i64<0>
4289 // t29: v4i16 = extract_subvector t23, Constant:i64<4>
4290 // t26: v8i16 = extract_subvector t69, Constant:i64<8>
4291 // t30: v4i16 = extract_subvector t26, Constant:i64<0>
4292 // t54: v4i16 = vector_shuffle<1,2,3,4> t29, t30
4293 static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
4294                                                SDValue V1, SDValue V2,
4295                                                ArrayRef<int> Mask,
4296                                                const RISCVSubtarget &Subtarget,
4297                                                SelectionDAG &DAG) {
4298   auto findNonEXTRACT_SUBVECTORParent =
4299       [](SDValue Parent) -> std::pair<SDValue, uint64_t> {
4300     uint64_t Offset = 0;
4301     while (Parent.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4302            // EXTRACT_SUBVECTOR can be used to extract a fixed-width vector from
4303            // a scalable vector. But we don't want to match the case.
4304            Parent.getOperand(0).getSimpleValueType().isFixedLengthVector()) {
4305       Offset += Parent.getConstantOperandVal(1);
4306       Parent = Parent.getOperand(0);
4307     }
4308     return std::make_pair(Parent, Offset);
4309   };
4310 
4311   auto [V1Src, V1IndexOffset] = findNonEXTRACT_SUBVECTORParent(V1);
4312   auto [V2Src, V2IndexOffset] = findNonEXTRACT_SUBVECTORParent(V2);
4313 
4314   // Extracting from the same source.
4315   SDValue Src = V1Src;
4316   if (Src != V2Src)
4317     return SDValue();
4318 
4319   // Rebuild mask because Src may be from multiple EXTRACT_SUBVECTORs.
4320   SmallVector<int, 16> NewMask(Mask);
4321   for (size_t i = 0; i != NewMask.size(); ++i) {
4322     if (NewMask[i] == -1)
4323       continue;
4324 
4325     if (static_cast<size_t>(NewMask[i]) < NewMask.size()) {
4326       NewMask[i] = NewMask[i] + V1IndexOffset;
4327     } else {
4328       // Minus NewMask.size() is needed. Otherwise, the b case would be
4329       // <5,6,7,12> instead of <5,6,7,8>.
4330       NewMask[i] = NewMask[i] - NewMask.size() + V2IndexOffset;
4331     }
4332   }
4333 
4334   // First index must be known and non-zero. It will be used as the slidedown
4335   // amount.
4336   if (NewMask[0] <= 0)
4337     return SDValue();
4338 
4339   // NewMask is also continuous.
4340   for (unsigned i = 1; i != NewMask.size(); ++i)
4341     if (NewMask[i - 1] + 1 != NewMask[i])
4342       return SDValue();
4343 
4344   MVT XLenVT = Subtarget.getXLenVT();
4345   MVT SrcVT = Src.getSimpleValueType();
4346   MVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
4347   auto [TrueMask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4348   SDValue Slidedown =
4349       getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
4350                     convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
4351                     DAG.getConstant(NewMask[0], DL, XLenVT), TrueMask, VL);
4352   return DAG.getNode(
4353       ISD::EXTRACT_SUBVECTOR, DL, VT,
4354       convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget),
4355       DAG.getConstant(0, DL, XLenVT));
4356 }
4357 
4358 // Because vslideup leaves the destination elements at the start intact, we can
4359 // use it to perform shuffles that insert subvectors:
4360 //
4361 // vector_shuffle v8:v8i8, v9:v8i8, <0, 1, 2, 3, 8, 9, 10, 11>
4362 // ->
4363 // vsetvli zero, 8, e8, mf2, ta, ma
4364 // vslideup.vi v8, v9, 4
4365 //
4366 // vector_shuffle v8:v8i8, v9:v8i8 <0, 1, 8, 9, 10, 5, 6, 7>
4367 // ->
4368 // vsetvli zero, 5, e8, mf2, tu, ma
4369 // vslideup.v1 v8, v9, 2
4370 static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
4371                                              SDValue V1, SDValue V2,
4372                                              ArrayRef<int> Mask,
4373                                              const RISCVSubtarget &Subtarget,
4374                                              SelectionDAG &DAG) {
4375   unsigned NumElts = VT.getVectorNumElements();
4376   int NumSubElts, Index;
4377   if (!ShuffleVectorInst::isInsertSubvectorMask(Mask, NumElts, NumSubElts,
4378                                                 Index))
4379     return SDValue();
4380 
4381   bool OpsSwapped = Mask[Index] < (int)NumElts;
4382   SDValue InPlace = OpsSwapped ? V2 : V1;
4383   SDValue ToInsert = OpsSwapped ? V1 : V2;
4384 
4385   MVT XLenVT = Subtarget.getXLenVT();
4386   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4387   auto TrueMask = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).first;
4388   // We slide up by the index that the subvector is being inserted at, and set
4389   // VL to the index + the number of elements being inserted.
4390   unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVII::MASK_AGNOSTIC;
4391   // If the we're adding a suffix to the in place vector, i.e. inserting right
4392   // up to the very end of it, then we don't actually care about the tail.
4393   if (NumSubElts + Index >= (int)NumElts)
4394     Policy |= RISCVII::TAIL_AGNOSTIC;
4395 
4396   InPlace = convertToScalableVector(ContainerVT, InPlace, DAG, Subtarget);
4397   ToInsert = convertToScalableVector(ContainerVT, ToInsert, DAG, Subtarget);
4398   SDValue VL = DAG.getConstant(NumSubElts + Index, DL, XLenVT);
4399 
4400   SDValue Res;
4401   // If we're inserting into the lowest elements, use a tail undisturbed
4402   // vmv.v.v.
4403   if (Index == 0)
4404     Res = DAG.getNode(RISCVISD::VMV_V_V_VL, DL, ContainerVT, InPlace, ToInsert,
4405                       VL);
4406   else
4407     Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, InPlace, ToInsert,
4408                       DAG.getConstant(Index, DL, XLenVT), TrueMask, VL, Policy);
4409   return convertFromScalableVector(VT, Res, DAG, Subtarget);
4410 }
4411 
4412 /// Match v(f)slide1up/down idioms.  These operations involve sliding
4413 /// N-1 elements to make room for an inserted scalar at one end.
4414 static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT,
4415                                             SDValue V1, SDValue V2,
4416                                             ArrayRef<int> Mask,
4417                                             const RISCVSubtarget &Subtarget,
4418                                             SelectionDAG &DAG) {
4419   bool OpsSwapped = false;
4420   if (!isa<BuildVectorSDNode>(V1)) {
4421     if (!isa<BuildVectorSDNode>(V2))
4422       return SDValue();
4423     std::swap(V1, V2);
4424     OpsSwapped = true;
4425   }
4426   SDValue Splat = cast<BuildVectorSDNode>(V1)->getSplatValue();
4427   if (!Splat)
4428     return SDValue();
4429 
4430   // Return true if the mask could describe a slide of Mask.size() - 1
4431   // elements from concat_vector(V1, V2)[Base:] to [Offset:].
4432   auto isSlideMask = [](ArrayRef<int> Mask, unsigned Base, int Offset) {
4433     const unsigned S = (Offset > 0) ? 0 : -Offset;
4434     const unsigned E = Mask.size() - ((Offset > 0) ? Offset : 0);
4435     for (unsigned i = S; i != E; ++i)
4436       if (Mask[i] >= 0 && (unsigned)Mask[i] != Base + i + Offset)
4437         return false;
4438     return true;
4439   };
4440 
4441   const unsigned NumElts = VT.getVectorNumElements();
4442   bool IsVSlidedown = isSlideMask(Mask, OpsSwapped ? 0 : NumElts, 1);
4443   if (!IsVSlidedown && !isSlideMask(Mask, OpsSwapped ? 0 : NumElts, -1))
4444     return SDValue();
4445 
4446   const int InsertIdx = Mask[IsVSlidedown ? (NumElts - 1) : 0];
4447   // Inserted lane must come from splat, undef scalar is legal but not profitable.
4448   if (InsertIdx < 0 || InsertIdx / NumElts != (unsigned)OpsSwapped)
4449     return SDValue();
4450 
4451   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4452   auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4453   auto OpCode = IsVSlidedown ?
4454     (VT.isFloatingPoint() ? RISCVISD::VFSLIDE1DOWN_VL : RISCVISD::VSLIDE1DOWN_VL) :
4455     (VT.isFloatingPoint() ? RISCVISD::VFSLIDE1UP_VL : RISCVISD::VSLIDE1UP_VL);
4456   if (!VT.isFloatingPoint())
4457     Splat = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Splat);
4458   auto Vec = DAG.getNode(OpCode, DL, ContainerVT,
4459                          DAG.getUNDEF(ContainerVT),
4460                          convertToScalableVector(ContainerVT, V2, DAG, Subtarget),
4461                          Splat, TrueMask, VL);
4462   return convertFromScalableVector(VT, Vec, DAG, Subtarget);
4463 }
4464 
4465 // Given two input vectors of <[vscale x ]n x ty>, use vwaddu.vv and vwmaccu.vx
4466 // to create an interleaved vector of <[vscale x] n*2 x ty>.
4467 // This requires that the size of ty is less than the subtarget's maximum ELEN.
4468 static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
4469                                      const SDLoc &DL, SelectionDAG &DAG,
4470                                      const RISCVSubtarget &Subtarget) {
4471   MVT VecVT = EvenV.getSimpleValueType();
4472   MVT VecContainerVT = VecVT; // <vscale x n x ty>
4473   // Convert fixed vectors to scalable if needed
4474   if (VecContainerVT.isFixedLengthVector()) {
4475     VecContainerVT = getContainerForFixedLengthVector(DAG, VecVT, Subtarget);
4476     EvenV = convertToScalableVector(VecContainerVT, EvenV, DAG, Subtarget);
4477     OddV = convertToScalableVector(VecContainerVT, OddV, DAG, Subtarget);
4478   }
4479 
4480   assert(VecVT.getScalarSizeInBits() < Subtarget.getELen());
4481 
4482   // We're working with a vector of the same size as the resulting
4483   // interleaved vector, but with half the number of elements and
4484   // twice the SEW (Hence the restriction on not using the maximum
4485   // ELEN)
4486   MVT WideVT =
4487       MVT::getVectorVT(MVT::getIntegerVT(VecVT.getScalarSizeInBits() * 2),
4488                        VecVT.getVectorElementCount());
4489   MVT WideContainerVT = WideVT; // <vscale x n x ty*2>
4490   if (WideContainerVT.isFixedLengthVector())
4491     WideContainerVT = getContainerForFixedLengthVector(DAG, WideVT, Subtarget);
4492 
4493   // Bitcast the input vectors to integers in case they are FP
4494   VecContainerVT = VecContainerVT.changeTypeToInteger();
4495   EvenV = DAG.getBitcast(VecContainerVT, EvenV);
4496   OddV = DAG.getBitcast(VecContainerVT, OddV);
4497 
4498   auto [Mask, VL] = getDefaultVLOps(VecVT, VecContainerVT, DL, DAG, Subtarget);
4499   SDValue Passthru = DAG.getUNDEF(WideContainerVT);
4500 
4501   SDValue Interleaved;
4502   if (Subtarget.hasStdExtZvbb()) {
4503     // Interleaved = (OddV << VecVT.getScalarSizeInBits()) + EvenV.
4504     SDValue OffsetVec =
4505         DAG.getSplatVector(VecContainerVT, DL,
4506                            DAG.getConstant(VecVT.getScalarSizeInBits(), DL,
4507                                            Subtarget.getXLenVT()));
4508     Interleaved = DAG.getNode(RISCVISD::VWSLL_VL, DL, WideContainerVT, OddV,
4509                               OffsetVec, Passthru, Mask, VL);
4510     Interleaved = DAG.getNode(RISCVISD::VWADDU_W_VL, DL, WideContainerVT,
4511                               Interleaved, EvenV, Passthru, Mask, VL);
4512   } else {
4513     // Widen EvenV and OddV with 0s and add one copy of OddV to EvenV with
4514     // vwaddu.vv
4515     Interleaved = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideContainerVT, EvenV,
4516                               OddV, Passthru, Mask, VL);
4517 
4518     // Then get OddV * by 2^(VecVT.getScalarSizeInBits() - 1)
4519     SDValue AllOnesVec = DAG.getSplatVector(
4520         VecContainerVT, DL, DAG.getAllOnesConstant(DL, Subtarget.getXLenVT()));
4521     SDValue OddsMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideContainerVT,
4522                                   OddV, AllOnesVec, Passthru, Mask, VL);
4523 
4524     // Add the two together so we get
4525     //   (OddV * 0xff...ff) + (OddV + EvenV)
4526     // = (OddV * 0x100...00) + EvenV
4527     // = (OddV << VecVT.getScalarSizeInBits()) + EvenV
4528     // Note the ADD_VL and VLMULU_VL should get selected as vwmaccu.vx
4529     Interleaved = DAG.getNode(RISCVISD::ADD_VL, DL, WideContainerVT,
4530                               Interleaved, OddsMul, Passthru, Mask, VL);
4531   }
4532 
4533   // Bitcast from <vscale x n * ty*2> to <vscale x 2*n x ty>
4534   MVT ResultContainerVT = MVT::getVectorVT(
4535       VecVT.getVectorElementType(), // Make sure to use original type
4536       VecContainerVT.getVectorElementCount().multiplyCoefficientBy(2));
4537   Interleaved = DAG.getBitcast(ResultContainerVT, Interleaved);
4538 
4539   // Convert back to a fixed vector if needed
4540   MVT ResultVT =
4541       MVT::getVectorVT(VecVT.getVectorElementType(),
4542                        VecVT.getVectorElementCount().multiplyCoefficientBy(2));
4543   if (ResultVT.isFixedLengthVector())
4544     Interleaved =
4545         convertFromScalableVector(ResultVT, Interleaved, DAG, Subtarget);
4546 
4547   return Interleaved;
4548 }
4549 
4550 // If we have a vector of bits that we want to reverse, we can use a vbrev on a
4551 // larger element type, e.g. v32i1 can be reversed with a v1i32 bitreverse.
4552 static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
4553                                       SelectionDAG &DAG,
4554                                       const RISCVSubtarget &Subtarget) {
4555   SDLoc DL(SVN);
4556   MVT VT = SVN->getSimpleValueType(0);
4557   SDValue V = SVN->getOperand(0);
4558   unsigned NumElts = VT.getVectorNumElements();
4559 
4560   assert(VT.getVectorElementType() == MVT::i1);
4561 
4562   if (!ShuffleVectorInst::isReverseMask(SVN->getMask(),
4563                                         SVN->getMask().size()) ||
4564       !SVN->getOperand(1).isUndef())
4565     return SDValue();
4566 
4567   unsigned ViaEltSize = std::max((uint64_t)8, PowerOf2Ceil(NumElts));
4568   EVT ViaVT = EVT::getVectorVT(
4569       *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), ViaEltSize), 1);
4570   EVT ViaBitVT =
4571       EVT::getVectorVT(*DAG.getContext(), MVT::i1, ViaVT.getScalarSizeInBits());
4572 
4573   // If we don't have zvbb or the larger element type > ELEN, the operation will
4574   // be illegal.
4575   if (!Subtarget.getTargetLowering()->isOperationLegalOrCustom(ISD::BITREVERSE,
4576                                                                ViaVT) ||
4577       !Subtarget.getTargetLowering()->isTypeLegal(ViaBitVT))
4578     return SDValue();
4579 
4580   // If the bit vector doesn't fit exactly into the larger element type, we need
4581   // to insert it into the larger vector and then shift up the reversed bits
4582   // afterwards to get rid of the gap introduced.
4583   if (ViaEltSize > NumElts)
4584     V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ViaBitVT, DAG.getUNDEF(ViaBitVT),
4585                     V, DAG.getVectorIdxConstant(0, DL));
4586 
4587   SDValue Res =
4588       DAG.getNode(ISD::BITREVERSE, DL, ViaVT, DAG.getBitcast(ViaVT, V));
4589 
4590   // Shift up the reversed bits if the vector didn't exactly fit into the larger
4591   // element type.
4592   if (ViaEltSize > NumElts)
4593     Res = DAG.getNode(ISD::SRL, DL, ViaVT, Res,
4594                       DAG.getConstant(ViaEltSize - NumElts, DL, ViaVT));
4595 
4596   Res = DAG.getBitcast(ViaBitVT, Res);
4597 
4598   if (ViaEltSize > NumElts)
4599     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
4600                       DAG.getVectorIdxConstant(0, DL));
4601   return Res;
4602 }
4603 
4604 // Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
4605 // reinterpret it as a v2i32 and rotate it right by 8 instead. We can lower this
4606 // as a vror.vi if we have Zvkb, or otherwise as a vsll, vsrl and vor.
4607 static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN,
4608                                            SelectionDAG &DAG,
4609                                            const RISCVSubtarget &Subtarget) {
4610   SDLoc DL(SVN);
4611 
4612   EVT VT = SVN->getValueType(0);
4613   unsigned NumElts = VT.getVectorNumElements();
4614   unsigned EltSizeInBits = VT.getScalarSizeInBits();
4615   unsigned NumSubElts, RotateAmt;
4616   if (!ShuffleVectorInst::isBitRotateMask(SVN->getMask(), EltSizeInBits, 2,
4617                                           NumElts, NumSubElts, RotateAmt))
4618     return SDValue();
4619   MVT RotateVT = MVT::getVectorVT(MVT::getIntegerVT(EltSizeInBits * NumSubElts),
4620                                   NumElts / NumSubElts);
4621 
4622   // We might have a RotateVT that isn't legal, e.g. v4i64 on zve32x.
4623   if (!Subtarget.getTargetLowering()->isTypeLegal(RotateVT))
4624     return SDValue();
4625 
4626   SDValue Op = DAG.getBitcast(RotateVT, SVN->getOperand(0));
4627 
4628   SDValue Rotate;
4629   // A rotate of an i16 by 8 bits either direction is equivalent to a byteswap,
4630   // so canonicalize to vrev8.
4631   if (RotateVT.getScalarType() == MVT::i16 && RotateAmt == 8)
4632     Rotate = DAG.getNode(ISD::BSWAP, DL, RotateVT, Op);
4633   else
4634     Rotate = DAG.getNode(ISD::ROTL, DL, RotateVT, Op,
4635                          DAG.getConstant(RotateAmt, DL, RotateVT));
4636 
4637   return DAG.getBitcast(VT, Rotate);
4638 }
4639 
4640 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
4641                                    const RISCVSubtarget &Subtarget) {
4642   SDValue V1 = Op.getOperand(0);
4643   SDValue V2 = Op.getOperand(1);
4644   SDLoc DL(Op);
4645   MVT XLenVT = Subtarget.getXLenVT();
4646   MVT VT = Op.getSimpleValueType();
4647   unsigned NumElts = VT.getVectorNumElements();
4648   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4649 
4650   if (VT.getVectorElementType() == MVT::i1) {
4651     // Lower to a vror.vi of a larger element type if possible before we promote
4652     // i1s to i8s.
4653     if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
4654       return V;
4655     if (SDValue V = lowerBitreverseShuffle(SVN, DAG, Subtarget))
4656       return V;
4657 
4658     // Promote i1 shuffle to i8 shuffle.
4659     MVT WidenVT = MVT::getVectorVT(MVT::i8, VT.getVectorElementCount());
4660     V1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V1);
4661     V2 = V2.isUndef() ? DAG.getUNDEF(WidenVT)
4662                       : DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V2);
4663     SDValue Shuffled = DAG.getVectorShuffle(WidenVT, DL, V1, V2, SVN->getMask());
4664     return DAG.getSetCC(DL, VT, Shuffled, DAG.getConstant(0, DL, WidenVT),
4665                         ISD::SETNE);
4666   }
4667 
4668   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4669 
4670   auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4671 
4672   if (SVN->isSplat()) {
4673     const int Lane = SVN->getSplatIndex();
4674     if (Lane >= 0) {
4675       MVT SVT = VT.getVectorElementType();
4676 
4677       // Turn splatted vector load into a strided load with an X0 stride.
4678       SDValue V = V1;
4679       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
4680       // with undef.
4681       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
4682       int Offset = Lane;
4683       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
4684         int OpElements =
4685             V.getOperand(0).getSimpleValueType().getVectorNumElements();
4686         V = V.getOperand(Offset / OpElements);
4687         Offset %= OpElements;
4688       }
4689 
4690       // We need to ensure the load isn't atomic or volatile.
4691       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
4692         auto *Ld = cast<LoadSDNode>(V);
4693         Offset *= SVT.getStoreSize();
4694         SDValue NewAddr = DAG.getMemBasePlusOffset(
4695             Ld->getBasePtr(), TypeSize::getFixed(Offset), DL);
4696 
4697         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
4698         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
4699           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4700           SDValue IntID =
4701               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
4702           SDValue Ops[] = {Ld->getChain(),
4703                            IntID,
4704                            DAG.getUNDEF(ContainerVT),
4705                            NewAddr,
4706                            DAG.getRegister(RISCV::X0, XLenVT),
4707                            VL};
4708           SDValue NewLoad = DAG.getMemIntrinsicNode(
4709               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
4710               DAG.getMachineFunction().getMachineMemOperand(
4711                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
4712           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
4713           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4714         }
4715 
4716         // Otherwise use a scalar load and splat. This will give the best
4717         // opportunity to fold a splat into the operation. ISel can turn it into
4718         // the x0 strided load if we aren't able to fold away the select.
4719         if (SVT.isFloatingPoint())
4720           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
4721                           Ld->getPointerInfo().getWithOffset(Offset),
4722                           Ld->getOriginalAlign(),
4723                           Ld->getMemOperand()->getFlags());
4724         else
4725           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
4726                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
4727                              Ld->getOriginalAlign(),
4728                              Ld->getMemOperand()->getFlags());
4729         DAG.makeEquivalentMemoryOrdering(Ld, V);
4730 
4731         unsigned Opc =
4732             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
4733         SDValue Splat =
4734             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
4735         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
4736       }
4737 
4738       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
4739       assert(Lane < (int)NumElts && "Unexpected lane!");
4740       SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT,
4741                                    V1, DAG.getConstant(Lane, DL, XLenVT),
4742                                    DAG.getUNDEF(ContainerVT), TrueMask, VL);
4743       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
4744     }
4745   }
4746 
4747   ArrayRef<int> Mask = SVN->getMask();
4748 
4749   if (SDValue V =
4750           lowerVECTOR_SHUFFLEAsVSlide1(DL, VT, V1, V2, Mask, Subtarget, DAG))
4751     return V;
4752 
4753   if (SDValue V =
4754           lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
4755     return V;
4756 
4757   // A bitrotate will be one instruction on Zvkb, so try to lower to it first if
4758   // available.
4759   if (Subtarget.hasStdExtZvkb())
4760     if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
4761       return V;
4762 
4763   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
4764   // be undef which can be handled with a single SLIDEDOWN/UP.
4765   int LoSrc, HiSrc;
4766   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
4767   if (Rotation > 0) {
4768     SDValue LoV, HiV;
4769     if (LoSrc >= 0) {
4770       LoV = LoSrc == 0 ? V1 : V2;
4771       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
4772     }
4773     if (HiSrc >= 0) {
4774       HiV = HiSrc == 0 ? V1 : V2;
4775       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
4776     }
4777 
4778     // We found a rotation. We need to slide HiV down by Rotation. Then we need
4779     // to slide LoV up by (NumElts - Rotation).
4780     unsigned InvRotate = NumElts - Rotation;
4781 
4782     SDValue Res = DAG.getUNDEF(ContainerVT);
4783     if (HiV) {
4784       // Even though we could use a smaller VL, don't to avoid a vsetivli
4785       // toggle.
4786       Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
4787                           DAG.getConstant(Rotation, DL, XLenVT), TrueMask, VL);
4788     }
4789     if (LoV)
4790       Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,
4791                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL,
4792                         RISCVII::TAIL_AGNOSTIC);
4793 
4794     return convertFromScalableVector(VT, Res, DAG, Subtarget);
4795   }
4796 
4797   // If this is a deinterleave and we can widen the vector, then we can use
4798   // vnsrl to deinterleave.
4799   if (isDeinterleaveShuffle(VT, ContainerVT, V1, V2, Mask, Subtarget)) {
4800     return getDeinterleaveViaVNSRL(DL, VT, V1.getOperand(0), Mask[0] == 0,
4801                                    Subtarget, DAG);
4802   }
4803 
4804   if (SDValue V =
4805           lowerVECTOR_SHUFFLEAsVSlideup(DL, VT, V1, V2, Mask, Subtarget, DAG))
4806     return V;
4807 
4808   // Detect an interleave shuffle and lower to
4809   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
4810   int EvenSrc, OddSrc;
4811   if (isInterleaveShuffle(Mask, VT, EvenSrc, OddSrc, Subtarget)) {
4812     // Extract the halves of the vectors.
4813     MVT HalfVT = VT.getHalfNumVectorElementsVT();
4814 
4815     int Size = Mask.size();
4816     SDValue EvenV, OddV;
4817     assert(EvenSrc >= 0 && "Undef source?");
4818     EvenV = (EvenSrc / Size) == 0 ? V1 : V2;
4819     EvenV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, EvenV,
4820                         DAG.getConstant(EvenSrc % Size, DL, XLenVT));
4821 
4822     assert(OddSrc >= 0 && "Undef source?");
4823     OddV = (OddSrc / Size) == 0 ? V1 : V2;
4824     OddV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, OddV,
4825                        DAG.getConstant(OddSrc % Size, DL, XLenVT));
4826 
4827     return getWideningInterleave(EvenV, OddV, DL, DAG, Subtarget);
4828   }
4829 
4830   // Detect shuffles which can be re-expressed as vector selects; these are
4831   // shuffles in which each element in the destination is taken from an element
4832   // at the corresponding index in either source vectors.
4833   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
4834     int MaskIndex = MaskIdx.value();
4835     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
4836   });
4837 
4838   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
4839 
4840   SmallVector<SDValue> MaskVals;
4841   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
4842   // merged with a second vrgather.
4843   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
4844 
4845   // By default we preserve the original operand order, and use a mask to
4846   // select LHS as true and RHS as false. However, since RVV vector selects may
4847   // feature splats but only on the LHS, we may choose to invert our mask and
4848   // instead select between RHS and LHS.
4849   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
4850   bool InvertMask = IsSelect == SwapOps;
4851 
4852   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
4853   // half.
4854   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
4855 
4856   // Now construct the mask that will be used by the vselect or blended
4857   // vrgather operation. For vrgathers, construct the appropriate indices into
4858   // each vector.
4859   for (int MaskIndex : Mask) {
4860     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
4861     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
4862     if (!IsSelect) {
4863       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
4864       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
4865                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
4866                                      : DAG.getUNDEF(XLenVT));
4867       GatherIndicesRHS.push_back(
4868           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
4869                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
4870       if (IsLHSOrUndefIndex && MaskIndex >= 0)
4871         ++LHSIndexCounts[MaskIndex];
4872       if (!IsLHSOrUndefIndex)
4873         ++RHSIndexCounts[MaskIndex - NumElts];
4874     }
4875   }
4876 
4877   if (SwapOps) {
4878     std::swap(V1, V2);
4879     std::swap(GatherIndicesLHS, GatherIndicesRHS);
4880   }
4881 
4882   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
4883   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4884   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
4885 
4886   if (IsSelect)
4887     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
4888 
4889   // We might be able to express the shuffle as a bitrotate. But even if we
4890   // don't have Zvkb and have to expand, the expanded sequence of approx. 2
4891   // shifts and a vor will have a higher throughput than a vrgather.
4892   if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
4893     return V;
4894 
4895   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
4896     // On such a large vector we're unable to use i8 as the index type.
4897     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
4898     // may involve vector splitting if we're already at LMUL=8, or our
4899     // user-supplied maximum fixed-length LMUL.
4900     return SDValue();
4901   }
4902 
4903   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
4904   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
4905   MVT IndexVT = VT.changeTypeToInteger();
4906   // Since we can't introduce illegal index types at this stage, use i16 and
4907   // vrgatherei16 if the corresponding index type for plain vrgather is greater
4908   // than XLenVT.
4909   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
4910     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
4911     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
4912   }
4913 
4914   // If the mask allows, we can do all the index computation in 16 bits.  This
4915   // requires less work and less register pressure at high LMUL, and creates
4916   // smaller constants which may be cheaper to materialize.
4917   if (IndexVT.getScalarType().bitsGT(MVT::i16) && isUInt<16>(NumElts - 1) &&
4918       (IndexVT.getSizeInBits() / Subtarget.getRealMinVLen()) > 1) {
4919     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
4920     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
4921   }
4922 
4923   MVT IndexContainerVT =
4924       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
4925 
4926   SDValue Gather;
4927   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
4928   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
4929   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
4930     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
4931                               Subtarget);
4932   } else {
4933     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
4934     // If only one index is used, we can use a "splat" vrgather.
4935     // TODO: We can splat the most-common index and fix-up any stragglers, if
4936     // that's beneficial.
4937     if (LHSIndexCounts.size() == 1) {
4938       int SplatIndex = LHSIndexCounts.begin()->getFirst();
4939       Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
4940                            DAG.getConstant(SplatIndex, DL, XLenVT),
4941                            DAG.getUNDEF(ContainerVT), TrueMask, VL);
4942     } else {
4943       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
4944       LHSIndices =
4945           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
4946 
4947       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
4948                            DAG.getUNDEF(ContainerVT), TrueMask, VL);
4949     }
4950   }
4951 
4952   // If a second vector operand is used by this shuffle, blend it in with an
4953   // additional vrgather.
4954   if (!V2.isUndef()) {
4955     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
4956 
4957     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4958     SelectMask =
4959         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
4960 
4961     // If only one index is used, we can use a "splat" vrgather.
4962     // TODO: We can splat the most-common index and fix-up any stragglers, if
4963     // that's beneficial.
4964     if (RHSIndexCounts.size() == 1) {
4965       int SplatIndex = RHSIndexCounts.begin()->getFirst();
4966       Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
4967                            DAG.getConstant(SplatIndex, DL, XLenVT), Gather,
4968                            SelectMask, VL);
4969     } else {
4970       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
4971       RHSIndices =
4972           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
4973       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, Gather,
4974                            SelectMask, VL);
4975     }
4976   }
4977 
4978   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
4979 }
4980 
4981 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
4982   // Support splats for any type. These should type legalize well.
4983   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
4984     return true;
4985 
4986   // Only support legal VTs for other shuffles for now.
4987   if (!isTypeLegal(VT))
4988     return false;
4989 
4990   MVT SVT = VT.getSimpleVT();
4991 
4992   // Not for i1 vectors.
4993   if (SVT.getScalarType() == MVT::i1)
4994     return false;
4995 
4996   int Dummy1, Dummy2;
4997   return (isElementRotate(Dummy1, Dummy2, M) > 0) ||
4998          isInterleaveShuffle(M, SVT, Dummy1, Dummy2, Subtarget);
4999 }
5000 
5001 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
5002 // the exponent.
5003 SDValue
5004 RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op,
5005                                                SelectionDAG &DAG) const {
5006   MVT VT = Op.getSimpleValueType();
5007   unsigned EltSize = VT.getScalarSizeInBits();
5008   SDValue Src = Op.getOperand(0);
5009   SDLoc DL(Op);
5010   MVT ContainerVT = VT;
5011 
5012   SDValue Mask, VL;
5013   if (Op->isVPOpcode()) {
5014     Mask = Op.getOperand(1);
5015     if (VT.isFixedLengthVector())
5016       Mask = convertToScalableVector(getMaskTypeFor(ContainerVT), Mask, DAG,
5017                                      Subtarget);
5018     VL = Op.getOperand(2);
5019   }
5020 
5021   // We choose FP type that can represent the value if possible. Otherwise, we
5022   // use rounding to zero conversion for correct exponent of the result.
5023   // TODO: Use f16 for i8 when possible?
5024   MVT FloatEltVT = (EltSize >= 32) ? MVT::f64 : MVT::f32;
5025   if (!isTypeLegal(MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount())))
5026     FloatEltVT = MVT::f32;
5027   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
5028 
5029   // Legal types should have been checked in the RISCVTargetLowering
5030   // constructor.
5031   // TODO: Splitting may make sense in some cases.
5032   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
5033          "Expected legal float type!");
5034 
5035   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
5036   // The trailing zero count is equal to log2 of this single bit value.
5037   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
5038     SDValue Neg = DAG.getNegative(Src, DL, VT);
5039     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
5040   } else if (Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF) {
5041     SDValue Neg = DAG.getNode(ISD::VP_SUB, DL, VT, DAG.getConstant(0, DL, VT),
5042                               Src, Mask, VL);
5043     Src = DAG.getNode(ISD::VP_AND, DL, VT, Src, Neg, Mask, VL);
5044   }
5045 
5046   // We have a legal FP type, convert to it.
5047   SDValue FloatVal;
5048   if (FloatVT.bitsGT(VT)) {
5049     if (Op->isVPOpcode())
5050       FloatVal = DAG.getNode(ISD::VP_UINT_TO_FP, DL, FloatVT, Src, Mask, VL);
5051     else
5052       FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
5053   } else {
5054     // Use RTZ to avoid rounding influencing exponent of FloatVal.
5055     if (VT.isFixedLengthVector()) {
5056       ContainerVT = getContainerForFixedLengthVector(VT);
5057       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
5058     }
5059     if (!Op->isVPOpcode())
5060       std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5061     SDValue RTZRM =
5062         DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT());
5063     MVT ContainerFloatVT =
5064         MVT::getVectorVT(FloatEltVT, ContainerVT.getVectorElementCount());
5065     FloatVal = DAG.getNode(RISCVISD::VFCVT_RM_F_XU_VL, DL, ContainerFloatVT,
5066                            Src, Mask, RTZRM, VL);
5067     if (VT.isFixedLengthVector())
5068       FloatVal = convertFromScalableVector(FloatVT, FloatVal, DAG, Subtarget);
5069   }
5070   // Bitcast to integer and shift the exponent to the LSB.
5071   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
5072   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
5073   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
5074 
5075   SDValue Exp;
5076   // Restore back to original type. Truncation after SRL is to generate vnsrl.
5077   if (Op->isVPOpcode()) {
5078     Exp = DAG.getNode(ISD::VP_LSHR, DL, IntVT, Bitcast,
5079                       DAG.getConstant(ShiftAmt, DL, IntVT), Mask, VL);
5080     Exp = DAG.getVPZExtOrTrunc(DL, VT, Exp, Mask, VL);
5081   } else {
5082     Exp = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
5083                       DAG.getConstant(ShiftAmt, DL, IntVT));
5084     if (IntVT.bitsLT(VT))
5085       Exp = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Exp);
5086     else if (IntVT.bitsGT(VT))
5087       Exp = DAG.getNode(ISD::TRUNCATE, DL, VT, Exp);
5088   }
5089 
5090   // The exponent contains log2 of the value in biased form.
5091   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
5092   // For trailing zeros, we just need to subtract the bias.
5093   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
5094     return DAG.getNode(ISD::SUB, DL, VT, Exp,
5095                        DAG.getConstant(ExponentBias, DL, VT));
5096   if (Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF)
5097     return DAG.getNode(ISD::VP_SUB, DL, VT, Exp,
5098                        DAG.getConstant(ExponentBias, DL, VT), Mask, VL);
5099 
5100   // For leading zeros, we need to remove the bias and convert from log2 to
5101   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
5102   unsigned Adjust = ExponentBias + (EltSize - 1);
5103   SDValue Res;
5104   if (Op->isVPOpcode())
5105     Res = DAG.getNode(ISD::VP_SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Exp,
5106                       Mask, VL);
5107   else
5108     Res = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Exp);
5109 
5110   // The above result with zero input equals to Adjust which is greater than
5111   // EltSize. Hence, we can do min(Res, EltSize) for CTLZ.
5112   if (Op.getOpcode() == ISD::CTLZ)
5113     Res = DAG.getNode(ISD::UMIN, DL, VT, Res, DAG.getConstant(EltSize, DL, VT));
5114   else if (Op.getOpcode() == ISD::VP_CTLZ)
5115     Res = DAG.getNode(ISD::VP_UMIN, DL, VT, Res,
5116                       DAG.getConstant(EltSize, DL, VT), Mask, VL);
5117   return Res;
5118 }
5119 
5120 // While RVV has alignment restrictions, we should always be able to load as a
5121 // legal equivalently-sized byte-typed vector instead. This method is
5122 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
5123 // the load is already correctly-aligned, it returns SDValue().
5124 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
5125                                                     SelectionDAG &DAG) const {
5126   auto *Load = cast<LoadSDNode>(Op);
5127   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
5128 
5129   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5130                                      Load->getMemoryVT(),
5131                                      *Load->getMemOperand()))
5132     return SDValue();
5133 
5134   SDLoc DL(Op);
5135   MVT VT = Op.getSimpleValueType();
5136   unsigned EltSizeBits = VT.getScalarSizeInBits();
5137   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5138          "Unexpected unaligned RVV load type");
5139   MVT NewVT =
5140       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
5141   assert(NewVT.isValid() &&
5142          "Expecting equally-sized RVV vector types to be legal");
5143   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
5144                           Load->getPointerInfo(), Load->getOriginalAlign(),
5145                           Load->getMemOperand()->getFlags());
5146   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
5147 }
5148 
5149 // While RVV has alignment restrictions, we should always be able to store as a
5150 // legal equivalently-sized byte-typed vector instead. This method is
5151 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
5152 // returns SDValue() if the store is already correctly aligned.
5153 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
5154                                                      SelectionDAG &DAG) const {
5155   auto *Store = cast<StoreSDNode>(Op);
5156   assert(Store && Store->getValue().getValueType().isVector() &&
5157          "Expected vector store");
5158 
5159   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5160                                      Store->getMemoryVT(),
5161                                      *Store->getMemOperand()))
5162     return SDValue();
5163 
5164   SDLoc DL(Op);
5165   SDValue StoredVal = Store->getValue();
5166   MVT VT = StoredVal.getSimpleValueType();
5167   unsigned EltSizeBits = VT.getScalarSizeInBits();
5168   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5169          "Unexpected unaligned RVV store type");
5170   MVT NewVT =
5171       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
5172   assert(NewVT.isValid() &&
5173          "Expecting equally-sized RVV vector types to be legal");
5174   StoredVal = DAG.getBitcast(NewVT, StoredVal);
5175   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
5176                       Store->getPointerInfo(), Store->getOriginalAlign(),
5177                       Store->getMemOperand()->getFlags());
5178 }
5179 
5180 static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
5181                              const RISCVSubtarget &Subtarget) {
5182   assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
5183 
5184   int64_t Imm = cast<ConstantSDNode>(Op)->getSExtValue();
5185 
5186   // All simm32 constants should be handled by isel.
5187   // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
5188   // this check redundant, but small immediates are common so this check
5189   // should have better compile time.
5190   if (isInt<32>(Imm))
5191     return Op;
5192 
5193   // We only need to cost the immediate, if constant pool lowering is enabled.
5194   if (!Subtarget.useConstantPoolForLargeInts())
5195     return Op;
5196 
5197   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, Subtarget);
5198   if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
5199     return Op;
5200 
5201   // Optimizations below are disabled for opt size. If we're optimizing for
5202   // size, use a constant pool.
5203   if (DAG.shouldOptForSize())
5204     return SDValue();
5205 
5206   // Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
5207   // that if it will avoid a constant pool.
5208   // It will require an extra temporary register though.
5209   // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
5210   // low and high 32 bits are the same and bit 31 and 63 are set.
5211   unsigned ShiftAmt, AddOpc;
5212   RISCVMatInt::InstSeq SeqLo =
5213       RISCVMatInt::generateTwoRegInstSeq(Imm, Subtarget, ShiftAmt, AddOpc);
5214   if (!SeqLo.empty() && (SeqLo.size() + 2) <= Subtarget.getMaxBuildIntsCost())
5215     return Op;
5216 
5217   return SDValue();
5218 }
5219 
5220 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
5221                                  const RISCVSubtarget &Subtarget) {
5222   SDLoc dl(Op);
5223   AtomicOrdering FenceOrdering =
5224       static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
5225   SyncScope::ID FenceSSID =
5226       static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
5227 
5228   if (Subtarget.hasStdExtZtso()) {
5229     // The only fence that needs an instruction is a sequentially-consistent
5230     // cross-thread fence.
5231     if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
5232         FenceSSID == SyncScope::System)
5233       return Op;
5234 
5235     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
5236     return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
5237   }
5238 
5239   // singlethread fences only synchronize with signal handlers on the same
5240   // thread and thus only need to preserve instruction order, not actually
5241   // enforce memory ordering.
5242   if (FenceSSID == SyncScope::SingleThread)
5243     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
5244     return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
5245 
5246   return Op;
5247 }
5248 
5249 SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
5250                                              SelectionDAG &DAG) const {
5251   SDLoc DL(Op);
5252   MVT VT = Op.getSimpleValueType();
5253   MVT XLenVT = Subtarget.getXLenVT();
5254   unsigned Check = Op.getConstantOperandVal(1);
5255   unsigned TDCMask = 0;
5256   if (Check & fcSNan)
5257     TDCMask |= RISCV::FPMASK_Signaling_NaN;
5258   if (Check & fcQNan)
5259     TDCMask |= RISCV::FPMASK_Quiet_NaN;
5260   if (Check & fcPosInf)
5261     TDCMask |= RISCV::FPMASK_Positive_Infinity;
5262   if (Check & fcNegInf)
5263     TDCMask |= RISCV::FPMASK_Negative_Infinity;
5264   if (Check & fcPosNormal)
5265     TDCMask |= RISCV::FPMASK_Positive_Normal;
5266   if (Check & fcNegNormal)
5267     TDCMask |= RISCV::FPMASK_Negative_Normal;
5268   if (Check & fcPosSubnormal)
5269     TDCMask |= RISCV::FPMASK_Positive_Subnormal;
5270   if (Check & fcNegSubnormal)
5271     TDCMask |= RISCV::FPMASK_Negative_Subnormal;
5272   if (Check & fcPosZero)
5273     TDCMask |= RISCV::FPMASK_Positive_Zero;
5274   if (Check & fcNegZero)
5275     TDCMask |= RISCV::FPMASK_Negative_Zero;
5276 
5277   bool IsOneBitMask = isPowerOf2_32(TDCMask);
5278 
5279   SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, XLenVT);
5280 
5281   if (VT.isVector()) {
5282     SDValue Op0 = Op.getOperand(0);
5283     MVT VT0 = Op.getOperand(0).getSimpleValueType();
5284 
5285     if (VT.isScalableVector()) {
5286       MVT DstVT = VT0.changeVectorElementTypeToInteger();
5287       auto [Mask, VL] = getDefaultScalableVLOps(VT0, DL, DAG, Subtarget);
5288       if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5289         Mask = Op.getOperand(2);
5290         VL = Op.getOperand(3);
5291       }
5292       SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, DstVT, Op0, Mask,
5293                                     VL, Op->getFlags());
5294       if (IsOneBitMask)
5295         return DAG.getSetCC(DL, VT, FPCLASS,
5296                             DAG.getConstant(TDCMask, DL, DstVT),
5297                             ISD::CondCode::SETEQ);
5298       SDValue AND = DAG.getNode(ISD::AND, DL, DstVT, FPCLASS,
5299                                 DAG.getConstant(TDCMask, DL, DstVT));
5300       return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, DstVT),
5301                           ISD::SETNE);
5302     }
5303 
5304     MVT ContainerVT0 = getContainerForFixedLengthVector(VT0);
5305     MVT ContainerVT = getContainerForFixedLengthVector(VT);
5306     MVT ContainerDstVT = ContainerVT0.changeVectorElementTypeToInteger();
5307     auto [Mask, VL] = getDefaultVLOps(VT0, ContainerVT0, DL, DAG, Subtarget);
5308     if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5309       Mask = Op.getOperand(2);
5310       MVT MaskContainerVT =
5311           getContainerForFixedLengthVector(Mask.getSimpleValueType());
5312       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
5313       VL = Op.getOperand(3);
5314     }
5315     Op0 = convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget);
5316 
5317     SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, ContainerDstVT, Op0,
5318                                   Mask, VL, Op->getFlags());
5319 
5320     TDCMaskV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT,
5321                            DAG.getUNDEF(ContainerDstVT), TDCMaskV, VL);
5322     if (IsOneBitMask) {
5323       SDValue VMSEQ =
5324           DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT,
5325                       {FPCLASS, TDCMaskV, DAG.getCondCode(ISD::SETEQ),
5326                        DAG.getUNDEF(ContainerVT), Mask, VL});
5327       return convertFromScalableVector(VT, VMSEQ, DAG, Subtarget);
5328     }
5329     SDValue AND = DAG.getNode(RISCVISD::AND_VL, DL, ContainerDstVT, FPCLASS,
5330                               TDCMaskV, DAG.getUNDEF(ContainerDstVT), Mask, VL);
5331 
5332     SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
5333     SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT,
5334                             DAG.getUNDEF(ContainerDstVT), SplatZero, VL);
5335 
5336     SDValue VMSNE = DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT,
5337                                 {AND, SplatZero, DAG.getCondCode(ISD::SETNE),
5338                                  DAG.getUNDEF(ContainerVT), Mask, VL});
5339     return convertFromScalableVector(VT, VMSNE, DAG, Subtarget);
5340   }
5341 
5342   SDValue FCLASS = DAG.getNode(RISCVISD::FCLASS, DL, XLenVT, Op.getOperand(0));
5343   SDValue AND = DAG.getNode(ISD::AND, DL, XLenVT, FCLASS, TDCMaskV);
5344   SDValue Res = DAG.getSetCC(DL, XLenVT, AND, DAG.getConstant(0, DL, XLenVT),
5345                              ISD::CondCode::SETNE);
5346   return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
5347 }
5348 
5349 // Lower fmaximum and fminimum. Unlike our fmax and fmin instructions, these
5350 // operations propagate nans.
5351 static SDValue lowerFMAXIMUM_FMINIMUM(SDValue Op, SelectionDAG &DAG,
5352                                       const RISCVSubtarget &Subtarget) {
5353   SDLoc DL(Op);
5354   MVT VT = Op.getSimpleValueType();
5355 
5356   SDValue X = Op.getOperand(0);
5357   SDValue Y = Op.getOperand(1);
5358 
5359   if (!VT.isVector()) {
5360     MVT XLenVT = Subtarget.getXLenVT();
5361 
5362     // If X is a nan, replace Y with X. If Y is a nan, replace X with Y. This
5363     // ensures that when one input is a nan, the other will also be a nan
5364     // allowing the nan to propagate. If both inputs are nan, this will swap the
5365     // inputs which is harmless.
5366 
5367     SDValue NewY = Y;
5368     if (!Op->getFlags().hasNoNaNs() && !DAG.isKnownNeverNaN(X)) {
5369       SDValue XIsNonNan = DAG.getSetCC(DL, XLenVT, X, X, ISD::SETOEQ);
5370       NewY = DAG.getSelect(DL, VT, XIsNonNan, Y, X);
5371     }
5372 
5373     SDValue NewX = X;
5374     if (!Op->getFlags().hasNoNaNs() && !DAG.isKnownNeverNaN(Y)) {
5375       SDValue YIsNonNan = DAG.getSetCC(DL, XLenVT, Y, Y, ISD::SETOEQ);
5376       NewX = DAG.getSelect(DL, VT, YIsNonNan, X, Y);
5377     }
5378 
5379     unsigned Opc =
5380         Op.getOpcode() == ISD::FMAXIMUM ? RISCVISD::FMAX : RISCVISD::FMIN;
5381     return DAG.getNode(Opc, DL, VT, NewX, NewY);
5382   }
5383 
5384   // Check no NaNs before converting to fixed vector scalable.
5385   bool XIsNeverNan = Op->getFlags().hasNoNaNs() || DAG.isKnownNeverNaN(X);
5386   bool YIsNeverNan = Op->getFlags().hasNoNaNs() || DAG.isKnownNeverNaN(Y);
5387 
5388   MVT ContainerVT = VT;
5389   if (VT.isFixedLengthVector()) {
5390     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
5391     X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5392     Y = convertToScalableVector(ContainerVT, Y, DAG, Subtarget);
5393   }
5394 
5395   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5396 
5397   SDValue NewY = Y;
5398   if (!XIsNeverNan) {
5399     SDValue XIsNonNan = DAG.getNode(RISCVISD::SETCC_VL, DL, Mask.getValueType(),
5400                                     {X, X, DAG.getCondCode(ISD::SETOEQ),
5401                                      DAG.getUNDEF(ContainerVT), Mask, VL});
5402     NewY =
5403         DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, XIsNonNan, Y, X, VL);
5404   }
5405 
5406   SDValue NewX = X;
5407   if (!YIsNeverNan) {
5408     SDValue YIsNonNan = DAG.getNode(RISCVISD::SETCC_VL, DL, Mask.getValueType(),
5409                                     {Y, Y, DAG.getCondCode(ISD::SETOEQ),
5410                                      DAG.getUNDEF(ContainerVT), Mask, VL});
5411     NewX =
5412         DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, YIsNonNan, X, Y, VL);
5413   }
5414 
5415   unsigned Opc =
5416       Op.getOpcode() == ISD::FMAXIMUM ? RISCVISD::VFMAX_VL : RISCVISD::VFMIN_VL;
5417   SDValue Res = DAG.getNode(Opc, DL, ContainerVT, NewX, NewY,
5418                             DAG.getUNDEF(ContainerVT), Mask, VL);
5419   if (VT.isFixedLengthVector())
5420     Res = convertFromScalableVector(VT, Res, DAG, Subtarget);
5421   return Res;
5422 }
5423 
5424 /// Get a RISC-V target specified VL op for a given SDNode.
5425 static unsigned getRISCVVLOp(SDValue Op) {
5426 #define OP_CASE(NODE)                                                          \
5427   case ISD::NODE:                                                              \
5428     return RISCVISD::NODE##_VL;
5429 #define VP_CASE(NODE)                                                          \
5430   case ISD::VP_##NODE:                                                         \
5431     return RISCVISD::NODE##_VL;
5432   // clang-format off
5433   switch (Op.getOpcode()) {
5434   default:
5435     llvm_unreachable("don't have RISC-V specified VL op for this SDNode");
5436   OP_CASE(ADD)
5437   OP_CASE(SUB)
5438   OP_CASE(MUL)
5439   OP_CASE(MULHS)
5440   OP_CASE(MULHU)
5441   OP_CASE(SDIV)
5442   OP_CASE(SREM)
5443   OP_CASE(UDIV)
5444   OP_CASE(UREM)
5445   OP_CASE(SHL)
5446   OP_CASE(SRA)
5447   OP_CASE(SRL)
5448   OP_CASE(ROTL)
5449   OP_CASE(ROTR)
5450   OP_CASE(BSWAP)
5451   OP_CASE(CTTZ)
5452   OP_CASE(CTLZ)
5453   OP_CASE(CTPOP)
5454   OP_CASE(BITREVERSE)
5455   OP_CASE(SADDSAT)
5456   OP_CASE(UADDSAT)
5457   OP_CASE(SSUBSAT)
5458   OP_CASE(USUBSAT)
5459   OP_CASE(FADD)
5460   OP_CASE(FSUB)
5461   OP_CASE(FMUL)
5462   OP_CASE(FDIV)
5463   OP_CASE(FNEG)
5464   OP_CASE(FABS)
5465   OP_CASE(FSQRT)
5466   OP_CASE(SMIN)
5467   OP_CASE(SMAX)
5468   OP_CASE(UMIN)
5469   OP_CASE(UMAX)
5470   OP_CASE(STRICT_FADD)
5471   OP_CASE(STRICT_FSUB)
5472   OP_CASE(STRICT_FMUL)
5473   OP_CASE(STRICT_FDIV)
5474   OP_CASE(STRICT_FSQRT)
5475   VP_CASE(ADD)        // VP_ADD
5476   VP_CASE(SUB)        // VP_SUB
5477   VP_CASE(MUL)        // VP_MUL
5478   VP_CASE(SDIV)       // VP_SDIV
5479   VP_CASE(SREM)       // VP_SREM
5480   VP_CASE(UDIV)       // VP_UDIV
5481   VP_CASE(UREM)       // VP_UREM
5482   VP_CASE(SHL)        // VP_SHL
5483   VP_CASE(FADD)       // VP_FADD
5484   VP_CASE(FSUB)       // VP_FSUB
5485   VP_CASE(FMUL)       // VP_FMUL
5486   VP_CASE(FDIV)       // VP_FDIV
5487   VP_CASE(FNEG)       // VP_FNEG
5488   VP_CASE(FABS)       // VP_FABS
5489   VP_CASE(SMIN)       // VP_SMIN
5490   VP_CASE(SMAX)       // VP_SMAX
5491   VP_CASE(UMIN)       // VP_UMIN
5492   VP_CASE(UMAX)       // VP_UMAX
5493   VP_CASE(FCOPYSIGN)  // VP_FCOPYSIGN
5494   VP_CASE(SETCC)      // VP_SETCC
5495   VP_CASE(SINT_TO_FP) // VP_SINT_TO_FP
5496   VP_CASE(UINT_TO_FP) // VP_UINT_TO_FP
5497   VP_CASE(BITREVERSE) // VP_BITREVERSE
5498   VP_CASE(BSWAP)      // VP_BSWAP
5499   VP_CASE(CTLZ)       // VP_CTLZ
5500   VP_CASE(CTTZ)       // VP_CTTZ
5501   VP_CASE(CTPOP)      // VP_CTPOP
5502   case ISD::CTLZ_ZERO_UNDEF:
5503   case ISD::VP_CTLZ_ZERO_UNDEF:
5504     return RISCVISD::CTLZ_VL;
5505   case ISD::CTTZ_ZERO_UNDEF:
5506   case ISD::VP_CTTZ_ZERO_UNDEF:
5507     return RISCVISD::CTTZ_VL;
5508   case ISD::FMA:
5509   case ISD::VP_FMA:
5510     return RISCVISD::VFMADD_VL;
5511   case ISD::STRICT_FMA:
5512     return RISCVISD::STRICT_VFMADD_VL;
5513   case ISD::AND:
5514   case ISD::VP_AND:
5515     if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5516       return RISCVISD::VMAND_VL;
5517     return RISCVISD::AND_VL;
5518   case ISD::OR:
5519   case ISD::VP_OR:
5520     if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5521       return RISCVISD::VMOR_VL;
5522     return RISCVISD::OR_VL;
5523   case ISD::XOR:
5524   case ISD::VP_XOR:
5525     if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5526       return RISCVISD::VMXOR_VL;
5527     return RISCVISD::XOR_VL;
5528   case ISD::VP_SELECT:
5529     return RISCVISD::VSELECT_VL;
5530   case ISD::VP_MERGE:
5531     return RISCVISD::VP_MERGE_VL;
5532   case ISD::VP_ASHR:
5533     return RISCVISD::SRA_VL;
5534   case ISD::VP_LSHR:
5535     return RISCVISD::SRL_VL;
5536   case ISD::VP_SQRT:
5537     return RISCVISD::FSQRT_VL;
5538   case ISD::VP_SIGN_EXTEND:
5539     return RISCVISD::VSEXT_VL;
5540   case ISD::VP_ZERO_EXTEND:
5541     return RISCVISD::VZEXT_VL;
5542   case ISD::VP_FP_TO_SINT:
5543     return RISCVISD::VFCVT_RTZ_X_F_VL;
5544   case ISD::VP_FP_TO_UINT:
5545     return RISCVISD::VFCVT_RTZ_XU_F_VL;
5546   case ISD::FMINNUM:
5547   case ISD::VP_FMINNUM:
5548     return RISCVISD::VFMIN_VL;
5549   case ISD::FMAXNUM:
5550   case ISD::VP_FMAXNUM:
5551     return RISCVISD::VFMAX_VL;
5552   }
5553   // clang-format on
5554 #undef OP_CASE
5555 #undef VP_CASE
5556 }
5557 
5558 /// Return true if a RISC-V target specified op has a merge operand.
5559 static bool hasMergeOp(unsigned Opcode) {
5560   assert(Opcode > RISCVISD::FIRST_NUMBER &&
5561          Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
5562          "not a RISC-V target specific op");
5563   static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
5564                     125 &&
5565                 RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
5566                         ISD::FIRST_TARGET_STRICTFP_OPCODE ==
5567                     21 &&
5568                 "adding target specific op should update this function");
5569   if (Opcode >= RISCVISD::ADD_VL && Opcode <= RISCVISD::VFMAX_VL)
5570     return true;
5571   if (Opcode == RISCVISD::FCOPYSIGN_VL)
5572     return true;
5573   if (Opcode >= RISCVISD::VWMUL_VL && Opcode <= RISCVISD::VFWSUB_W_VL)
5574     return true;
5575   if (Opcode == RISCVISD::SETCC_VL)
5576     return true;
5577   if (Opcode >= RISCVISD::STRICT_FADD_VL && Opcode <= RISCVISD::STRICT_FDIV_VL)
5578     return true;
5579   return false;
5580 }
5581 
5582 /// Return true if a RISC-V target specified op has a mask operand.
5583 static bool hasMaskOp(unsigned Opcode) {
5584   assert(Opcode > RISCVISD::FIRST_NUMBER &&
5585          Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
5586          "not a RISC-V target specific op");
5587   static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
5588                     125 &&
5589                 RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
5590                         ISD::FIRST_TARGET_STRICTFP_OPCODE ==
5591                     21 &&
5592                 "adding target specific op should update this function");
5593   if (Opcode >= RISCVISD::TRUNCATE_VECTOR_VL && Opcode <= RISCVISD::SETCC_VL)
5594     return true;
5595   if (Opcode >= RISCVISD::VRGATHER_VX_VL && Opcode <= RISCVISD::VFIRST_VL)
5596     return true;
5597   if (Opcode >= RISCVISD::STRICT_FADD_VL &&
5598       Opcode <= RISCVISD::STRICT_VFROUND_NOEXCEPT_VL)
5599     return true;
5600   return false;
5601 }
5602 
5603 static SDValue SplitVectorOp(SDValue Op, SelectionDAG &DAG) {
5604   auto [LoVT, HiVT] = DAG.GetSplitDestVTs(Op.getValueType());
5605   SDLoc DL(Op);
5606 
5607   SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
5608   SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
5609 
5610   for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
5611     if (!Op.getOperand(j).getValueType().isVector()) {
5612       LoOperands[j] = Op.getOperand(j);
5613       HiOperands[j] = Op.getOperand(j);
5614       continue;
5615     }
5616     std::tie(LoOperands[j], HiOperands[j]) =
5617         DAG.SplitVector(Op.getOperand(j), DL);
5618   }
5619 
5620   SDValue LoRes =
5621       DAG.getNode(Op.getOpcode(), DL, LoVT, LoOperands, Op->getFlags());
5622   SDValue HiRes =
5623       DAG.getNode(Op.getOpcode(), DL, HiVT, HiOperands, Op->getFlags());
5624 
5625   return DAG.getNode(ISD::CONCAT_VECTORS, DL, Op.getValueType(), LoRes, HiRes);
5626 }
5627 
5628 static SDValue SplitVPOp(SDValue Op, SelectionDAG &DAG) {
5629   assert(ISD::isVPOpcode(Op.getOpcode()) && "Not a VP op");
5630   auto [LoVT, HiVT] = DAG.GetSplitDestVTs(Op.getValueType());
5631   SDLoc DL(Op);
5632 
5633   SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
5634   SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
5635 
5636   for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
5637     if (ISD::getVPExplicitVectorLengthIdx(Op.getOpcode()) == j) {
5638       std::tie(LoOperands[j], HiOperands[j]) =
5639           DAG.SplitEVL(Op.getOperand(j), Op.getValueType(), DL);
5640       continue;
5641     }
5642     if (!Op.getOperand(j).getValueType().isVector()) {
5643       LoOperands[j] = Op.getOperand(j);
5644       HiOperands[j] = Op.getOperand(j);
5645       continue;
5646     }
5647     std::tie(LoOperands[j], HiOperands[j]) =
5648         DAG.SplitVector(Op.getOperand(j), DL);
5649   }
5650 
5651   SDValue LoRes =
5652       DAG.getNode(Op.getOpcode(), DL, LoVT, LoOperands, Op->getFlags());
5653   SDValue HiRes =
5654       DAG.getNode(Op.getOpcode(), DL, HiVT, HiOperands, Op->getFlags());
5655 
5656   return DAG.getNode(ISD::CONCAT_VECTORS, DL, Op.getValueType(), LoRes, HiRes);
5657 }
5658 
5659 static SDValue SplitVectorReductionOp(SDValue Op, SelectionDAG &DAG) {
5660   SDLoc DL(Op);
5661 
5662   auto [Lo, Hi] = DAG.SplitVector(Op.getOperand(1), DL);
5663   auto [MaskLo, MaskHi] = DAG.SplitVector(Op.getOperand(2), DL);
5664   auto [EVLLo, EVLHi] =
5665       DAG.SplitEVL(Op.getOperand(3), Op.getOperand(1).getValueType(), DL);
5666 
5667   SDValue ResLo =
5668       DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
5669                   {Op.getOperand(0), Lo, MaskLo, EVLLo}, Op->getFlags());
5670   return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
5671                      {ResLo, Hi, MaskHi, EVLHi}, Op->getFlags());
5672 }
5673 
5674 static SDValue SplitStrictFPVectorOp(SDValue Op, SelectionDAG &DAG) {
5675 
5676   assert(Op->isStrictFPOpcode());
5677 
5678   auto [LoVT, HiVT] = DAG.GetSplitDestVTs(Op->getValueType(0));
5679 
5680   SDVTList LoVTs = DAG.getVTList(LoVT, Op->getValueType(1));
5681   SDVTList HiVTs = DAG.getVTList(HiVT, Op->getValueType(1));
5682 
5683   SDLoc DL(Op);
5684 
5685   SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
5686   SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
5687 
5688   for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
5689     if (!Op.getOperand(j).getValueType().isVector()) {
5690       LoOperands[j] = Op.getOperand(j);
5691       HiOperands[j] = Op.getOperand(j);
5692       continue;
5693     }
5694     std::tie(LoOperands[j], HiOperands[j]) =
5695         DAG.SplitVector(Op.getOperand(j), DL);
5696   }
5697 
5698   SDValue LoRes =
5699       DAG.getNode(Op.getOpcode(), DL, LoVTs, LoOperands, Op->getFlags());
5700   HiOperands[0] = LoRes.getValue(1);
5701   SDValue HiRes =
5702       DAG.getNode(Op.getOpcode(), DL, HiVTs, HiOperands, Op->getFlags());
5703 
5704   SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, Op->getValueType(0),
5705                           LoRes.getValue(0), HiRes.getValue(0));
5706   return DAG.getMergeValues({V, HiRes.getValue(1)}, DL);
5707 }
5708 
5709 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
5710                                             SelectionDAG &DAG) const {
5711   switch (Op.getOpcode()) {
5712   default:
5713     report_fatal_error("unimplemented operand");
5714   case ISD::ATOMIC_FENCE:
5715     return LowerATOMIC_FENCE(Op, DAG, Subtarget);
5716   case ISD::GlobalAddress:
5717     return lowerGlobalAddress(Op, DAG);
5718   case ISD::BlockAddress:
5719     return lowerBlockAddress(Op, DAG);
5720   case ISD::ConstantPool:
5721     return lowerConstantPool(Op, DAG);
5722   case ISD::JumpTable:
5723     return lowerJumpTable(Op, DAG);
5724   case ISD::GlobalTLSAddress:
5725     return lowerGlobalTLSAddress(Op, DAG);
5726   case ISD::Constant:
5727     return lowerConstant(Op, DAG, Subtarget);
5728   case ISD::SELECT:
5729     return lowerSELECT(Op, DAG);
5730   case ISD::BRCOND:
5731     return lowerBRCOND(Op, DAG);
5732   case ISD::VASTART:
5733     return lowerVASTART(Op, DAG);
5734   case ISD::FRAMEADDR:
5735     return lowerFRAMEADDR(Op, DAG);
5736   case ISD::RETURNADDR:
5737     return lowerRETURNADDR(Op, DAG);
5738   case ISD::SHL_PARTS:
5739     return lowerShiftLeftParts(Op, DAG);
5740   case ISD::SRA_PARTS:
5741     return lowerShiftRightParts(Op, DAG, true);
5742   case ISD::SRL_PARTS:
5743     return lowerShiftRightParts(Op, DAG, false);
5744   case ISD::ROTL:
5745   case ISD::ROTR:
5746     if (Op.getValueType().isFixedLengthVector()) {
5747       assert(Subtarget.hasStdExtZvkb());
5748       return lowerToScalableOp(Op, DAG);
5749     }
5750     assert(Subtarget.hasVendorXTHeadBb() &&
5751            !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
5752            "Unexpected custom legalization");
5753     // XTHeadBb only supports rotate by constant.
5754     if (!isa<ConstantSDNode>(Op.getOperand(1)))
5755       return SDValue();
5756     return Op;
5757   case ISD::BITCAST: {
5758     SDLoc DL(Op);
5759     EVT VT = Op.getValueType();
5760     SDValue Op0 = Op.getOperand(0);
5761     EVT Op0VT = Op0.getValueType();
5762     MVT XLenVT = Subtarget.getXLenVT();
5763     if (VT == MVT::f16 && Op0VT == MVT::i16 &&
5764         Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) {
5765       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
5766       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
5767       return FPConv;
5768     }
5769     if (VT == MVT::bf16 && Op0VT == MVT::i16 &&
5770         Subtarget.hasStdExtZfbfmin()) {
5771       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
5772       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::bf16, NewOp0);
5773       return FPConv;
5774     }
5775     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
5776         Subtarget.hasStdExtFOrZfinx()) {
5777       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
5778       SDValue FPConv =
5779           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
5780       return FPConv;
5781     }
5782     if (VT == MVT::f64 && Op0VT == MVT::i64 && XLenVT == MVT::i32 &&
5783         Subtarget.hasStdExtZfa()) {
5784       SDValue Lo, Hi;
5785       std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
5786       SDValue RetReg =
5787           DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
5788       return RetReg;
5789     }
5790 
5791     // Consider other scalar<->scalar casts as legal if the types are legal.
5792     // Otherwise expand them.
5793     if (!VT.isVector() && !Op0VT.isVector()) {
5794       if (isTypeLegal(VT) && isTypeLegal(Op0VT))
5795         return Op;
5796       return SDValue();
5797     }
5798 
5799     assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
5800            "Unexpected types");
5801 
5802     if (VT.isFixedLengthVector()) {
5803       // We can handle fixed length vector bitcasts with a simple replacement
5804       // in isel.
5805       if (Op0VT.isFixedLengthVector())
5806         return Op;
5807       // When bitcasting from scalar to fixed-length vector, insert the scalar
5808       // into a one-element vector of the result type, and perform a vector
5809       // bitcast.
5810       if (!Op0VT.isVector()) {
5811         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
5812         if (!isTypeLegal(BVT))
5813           return SDValue();
5814         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
5815                                               DAG.getUNDEF(BVT), Op0,
5816                                               DAG.getConstant(0, DL, XLenVT)));
5817       }
5818       return SDValue();
5819     }
5820     // Custom-legalize bitcasts from fixed-length vector types to scalar types
5821     // thus: bitcast the vector to a one-element vector type whose element type
5822     // is the same as the result type, and extract the first element.
5823     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
5824       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
5825       if (!isTypeLegal(BVT))
5826         return SDValue();
5827       SDValue BVec = DAG.getBitcast(BVT, Op0);
5828       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5829                          DAG.getConstant(0, DL, XLenVT));
5830     }
5831     return SDValue();
5832   }
5833   case ISD::INTRINSIC_WO_CHAIN:
5834     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5835   case ISD::INTRINSIC_W_CHAIN:
5836     return LowerINTRINSIC_W_CHAIN(Op, DAG);
5837   case ISD::INTRINSIC_VOID:
5838     return LowerINTRINSIC_VOID(Op, DAG);
5839   case ISD::IS_FPCLASS:
5840     return LowerIS_FPCLASS(Op, DAG);
5841   case ISD::BITREVERSE: {
5842     MVT VT = Op.getSimpleValueType();
5843     if (VT.isFixedLengthVector()) {
5844       assert(Subtarget.hasStdExtZvbb());
5845       return lowerToScalableOp(Op, DAG);
5846     }
5847     SDLoc DL(Op);
5848     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
5849     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
5850     // Expand bitreverse to a bswap(rev8) followed by brev8.
5851     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
5852     return DAG.getNode(RISCVISD::BREV8, DL, VT, BSwap);
5853   }
5854   case ISD::TRUNCATE:
5855     // Only custom-lower vector truncates
5856     if (!Op.getSimpleValueType().isVector())
5857       return Op;
5858     return lowerVectorTruncLike(Op, DAG);
5859   case ISD::ANY_EXTEND:
5860   case ISD::ZERO_EXTEND:
5861     if (Op.getOperand(0).getValueType().isVector() &&
5862         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
5863       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
5864     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
5865   case ISD::SIGN_EXTEND:
5866     if (Op.getOperand(0).getValueType().isVector() &&
5867         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
5868       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
5869     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
5870   case ISD::SPLAT_VECTOR_PARTS:
5871     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
5872   case ISD::INSERT_VECTOR_ELT:
5873     return lowerINSERT_VECTOR_ELT(Op, DAG);
5874   case ISD::EXTRACT_VECTOR_ELT:
5875     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
5876   case ISD::SCALAR_TO_VECTOR: {
5877     MVT VT = Op.getSimpleValueType();
5878     SDLoc DL(Op);
5879     SDValue Scalar = Op.getOperand(0);
5880     if (VT.getVectorElementType() == MVT::i1) {
5881       MVT WideVT = VT.changeVectorElementType(MVT::i8);
5882       SDValue V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, WideVT, Scalar);
5883       return DAG.getNode(ISD::TRUNCATE, DL, VT, V);
5884     }
5885     MVT ContainerVT = VT;
5886     if (VT.isFixedLengthVector())
5887       ContainerVT = getContainerForFixedLengthVector(VT);
5888     SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5889     Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Scalar);
5890     SDValue V = DAG.getNode(RISCVISD::VMV_S_X_VL, DL, ContainerVT,
5891                             DAG.getUNDEF(ContainerVT), Scalar, VL);
5892     if (VT.isFixedLengthVector())
5893       V = convertFromScalableVector(VT, V, DAG, Subtarget);
5894     return V;
5895   }
5896   case ISD::VSCALE: {
5897     MVT XLenVT = Subtarget.getXLenVT();
5898     MVT VT = Op.getSimpleValueType();
5899     SDLoc DL(Op);
5900     SDValue Res = DAG.getNode(RISCVISD::READ_VLENB, DL, XLenVT);
5901     // We define our scalable vector types for lmul=1 to use a 64 bit known
5902     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
5903     // vscale as VLENB / 8.
5904     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
5905     if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
5906       report_fatal_error("Support for VLEN==32 is incomplete.");
5907     // We assume VLENB is a multiple of 8. We manually choose the best shift
5908     // here because SimplifyDemandedBits isn't always able to simplify it.
5909     uint64_t Val = Op.getConstantOperandVal(0);
5910     if (isPowerOf2_64(Val)) {
5911       uint64_t Log2 = Log2_64(Val);
5912       if (Log2 < 3)
5913         Res = DAG.getNode(ISD::SRL, DL, XLenVT, Res,
5914                           DAG.getConstant(3 - Log2, DL, VT));
5915       else if (Log2 > 3)
5916         Res = DAG.getNode(ISD::SHL, DL, XLenVT, Res,
5917                           DAG.getConstant(Log2 - 3, DL, XLenVT));
5918     } else if ((Val % 8) == 0) {
5919       // If the multiplier is a multiple of 8, scale it down to avoid needing
5920       // to shift the VLENB value.
5921       Res = DAG.getNode(ISD::MUL, DL, XLenVT, Res,
5922                         DAG.getConstant(Val / 8, DL, XLenVT));
5923     } else {
5924       SDValue VScale = DAG.getNode(ISD::SRL, DL, XLenVT, Res,
5925                                    DAG.getConstant(3, DL, XLenVT));
5926       Res = DAG.getNode(ISD::MUL, DL, XLenVT, VScale,
5927                         DAG.getConstant(Val, DL, XLenVT));
5928     }
5929     return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
5930   }
5931   case ISD::FPOWI: {
5932     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
5933     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
5934     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
5935         Op.getOperand(1).getValueType() == MVT::i32) {
5936       SDLoc DL(Op);
5937       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
5938       SDValue Powi =
5939           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
5940       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
5941                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
5942     }
5943     return SDValue();
5944   }
5945   case ISD::FMAXIMUM:
5946   case ISD::FMINIMUM:
5947     if (Op.getValueType() == MVT::nxv32f16 &&
5948         (Subtarget.hasVInstructionsF16Minimal() &&
5949          !Subtarget.hasVInstructionsF16()))
5950       return SplitVectorOp(Op, DAG);
5951     return lowerFMAXIMUM_FMINIMUM(Op, DAG, Subtarget);
5952   case ISD::FP_EXTEND: {
5953     SDLoc DL(Op);
5954     EVT VT = Op.getValueType();
5955     SDValue Op0 = Op.getOperand(0);
5956     EVT Op0VT = Op0.getValueType();
5957     if (VT == MVT::f32 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())
5958       return DAG.getNode(RISCVISD::FP_EXTEND_BF16, DL, MVT::f32, Op0);
5959     if (VT == MVT::f64 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) {
5960       SDValue FloatVal =
5961           DAG.getNode(RISCVISD::FP_EXTEND_BF16, DL, MVT::f32, Op0);
5962       return DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, FloatVal);
5963     }
5964 
5965     if (!Op.getValueType().isVector())
5966       return Op;
5967     return lowerVectorFPExtendOrRoundLike(Op, DAG);
5968   }
5969   case ISD::FP_ROUND: {
5970     SDLoc DL(Op);
5971     EVT VT = Op.getValueType();
5972     SDValue Op0 = Op.getOperand(0);
5973     EVT Op0VT = Op0.getValueType();
5974     if (VT == MVT::bf16 && Op0VT == MVT::f32 && Subtarget.hasStdExtZfbfmin())
5975       return DAG.getNode(RISCVISD::FP_ROUND_BF16, DL, MVT::bf16, Op0);
5976     if (VT == MVT::bf16 && Op0VT == MVT::f64 && Subtarget.hasStdExtZfbfmin() &&
5977         Subtarget.hasStdExtDOrZdinx()) {
5978       SDValue FloatVal =
5979           DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, Op0,
5980                       DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
5981       return DAG.getNode(RISCVISD::FP_ROUND_BF16, DL, MVT::bf16, FloatVal);
5982     }
5983 
5984     if (!Op.getValueType().isVector())
5985       return Op;
5986     return lowerVectorFPExtendOrRoundLike(Op, DAG);
5987   }
5988   case ISD::STRICT_FP_ROUND:
5989   case ISD::STRICT_FP_EXTEND:
5990     return lowerStrictFPExtendOrRoundLike(Op, DAG);
5991   case ISD::SINT_TO_FP:
5992   case ISD::UINT_TO_FP:
5993     if (Op.getValueType().isVector() &&
5994         Op.getValueType().getScalarType() == MVT::f16 &&
5995         (Subtarget.hasVInstructionsF16Minimal() &&
5996          !Subtarget.hasVInstructionsF16())) {
5997       if (Op.getValueType() == MVT::nxv32f16)
5998         return SplitVectorOp(Op, DAG);
5999       // int -> f32
6000       SDLoc DL(Op);
6001       MVT NVT =
6002           MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount());
6003       SDValue NC = DAG.getNode(Op.getOpcode(), DL, NVT, Op->ops());
6004       // f32 -> f16
6005       return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NC,
6006                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
6007     }
6008     [[fallthrough]];
6009   case ISD::FP_TO_SINT:
6010   case ISD::FP_TO_UINT:
6011     if (SDValue Op1 = Op.getOperand(0);
6012         Op1.getValueType().isVector() &&
6013         Op1.getValueType().getScalarType() == MVT::f16 &&
6014         (Subtarget.hasVInstructionsF16Minimal() &&
6015          !Subtarget.hasVInstructionsF16())) {
6016       if (Op1.getValueType() == MVT::nxv32f16)
6017         return SplitVectorOp(Op, DAG);
6018       // f16 -> f32
6019       SDLoc DL(Op);
6020       MVT NVT = MVT::getVectorVT(MVT::f32,
6021                                  Op1.getValueType().getVectorElementCount());
6022       SDValue WidenVec = DAG.getNode(ISD::FP_EXTEND, DL, NVT, Op1);
6023       // f32 -> int
6024       return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), WidenVec);
6025     }
6026     [[fallthrough]];
6027   case ISD::STRICT_FP_TO_SINT:
6028   case ISD::STRICT_FP_TO_UINT:
6029   case ISD::STRICT_SINT_TO_FP:
6030   case ISD::STRICT_UINT_TO_FP: {
6031     // RVV can only do fp<->int conversions to types half/double the size as
6032     // the source. We custom-lower any conversions that do two hops into
6033     // sequences.
6034     MVT VT = Op.getSimpleValueType();
6035     if (!VT.isVector())
6036       return Op;
6037     SDLoc DL(Op);
6038     bool IsStrict = Op->isStrictFPOpcode();
6039     SDValue Src = Op.getOperand(0 + IsStrict);
6040     MVT EltVT = VT.getVectorElementType();
6041     MVT SrcVT = Src.getSimpleValueType();
6042     MVT SrcEltVT = SrcVT.getVectorElementType();
6043     unsigned EltSize = EltVT.getSizeInBits();
6044     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
6045     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
6046            "Unexpected vector element types");
6047 
6048     bool IsInt2FP = SrcEltVT.isInteger();
6049     // Widening conversions
6050     if (EltSize > (2 * SrcEltSize)) {
6051       if (IsInt2FP) {
6052         // Do a regular integer sign/zero extension then convert to float.
6053         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize / 2),
6054                                       VT.getVectorElementCount());
6055         unsigned ExtOpcode = (Op.getOpcode() == ISD::UINT_TO_FP ||
6056                               Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
6057                                  ? ISD::ZERO_EXTEND
6058                                  : ISD::SIGN_EXTEND;
6059         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
6060         if (IsStrict)
6061           return DAG.getNode(Op.getOpcode(), DL, Op->getVTList(),
6062                              Op.getOperand(0), Ext);
6063         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
6064       }
6065       // FP2Int
6066       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
6067       // Do one doubling fp_extend then complete the operation by converting
6068       // to int.
6069       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
6070       if (IsStrict) {
6071         auto [FExt, Chain] =
6072             DAG.getStrictFPExtendOrRound(Src, Op.getOperand(0), DL, InterimFVT);
6073         return DAG.getNode(Op.getOpcode(), DL, Op->getVTList(), Chain, FExt);
6074       }
6075       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
6076       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
6077     }
6078 
6079     // Narrowing conversions
6080     if (SrcEltSize > (2 * EltSize)) {
6081       if (IsInt2FP) {
6082         // One narrowing int_to_fp, then an fp_round.
6083         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
6084         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
6085         if (IsStrict) {
6086           SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL,
6087                                        DAG.getVTList(InterimFVT, MVT::Other),
6088                                        Op.getOperand(0), Src);
6089           SDValue Chain = Int2FP.getValue(1);
6090           return DAG.getStrictFPExtendOrRound(Int2FP, Chain, DL, VT).first;
6091         }
6092         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
6093         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
6094       }
6095       // FP2Int
6096       // One narrowing fp_to_int, then truncate the integer. If the float isn't
6097       // representable by the integer, the result is poison.
6098       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6099                                     VT.getVectorElementCount());
6100       if (IsStrict) {
6101         SDValue FP2Int =
6102             DAG.getNode(Op.getOpcode(), DL, DAG.getVTList(IVecVT, MVT::Other),
6103                         Op.getOperand(0), Src);
6104         SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
6105         return DAG.getMergeValues({Res, FP2Int.getValue(1)}, DL);
6106       }
6107       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
6108       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
6109     }
6110 
6111     // Scalable vectors can exit here. Patterns will handle equally-sized
6112     // conversions halving/doubling ones.
6113     if (!VT.isFixedLengthVector())
6114       return Op;
6115 
6116     // For fixed-length vectors we lower to a custom "VL" node.
6117     unsigned RVVOpc = 0;
6118     switch (Op.getOpcode()) {
6119     default:
6120       llvm_unreachable("Impossible opcode");
6121     case ISD::FP_TO_SINT:
6122       RVVOpc = RISCVISD::VFCVT_RTZ_X_F_VL;
6123       break;
6124     case ISD::FP_TO_UINT:
6125       RVVOpc = RISCVISD::VFCVT_RTZ_XU_F_VL;
6126       break;
6127     case ISD::SINT_TO_FP:
6128       RVVOpc = RISCVISD::SINT_TO_FP_VL;
6129       break;
6130     case ISD::UINT_TO_FP:
6131       RVVOpc = RISCVISD::UINT_TO_FP_VL;
6132       break;
6133     case ISD::STRICT_FP_TO_SINT:
6134       RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_X_F_VL;
6135       break;
6136     case ISD::STRICT_FP_TO_UINT:
6137       RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL;
6138       break;
6139     case ISD::STRICT_SINT_TO_FP:
6140       RVVOpc = RISCVISD::STRICT_SINT_TO_FP_VL;
6141       break;
6142     case ISD::STRICT_UINT_TO_FP:
6143       RVVOpc = RISCVISD::STRICT_UINT_TO_FP_VL;
6144       break;
6145     }
6146 
6147     MVT ContainerVT = getContainerForFixedLengthVector(VT);
6148     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
6149     assert(ContainerVT.getVectorElementCount() == SrcContainerVT.getVectorElementCount() &&
6150            "Expected same element count");
6151 
6152     auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6153 
6154     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
6155     if (IsStrict) {
6156       Src = DAG.getNode(RVVOpc, DL, DAG.getVTList(ContainerVT, MVT::Other),
6157                         Op.getOperand(0), Src, Mask, VL);
6158       SDValue SubVec = convertFromScalableVector(VT, Src, DAG, Subtarget);
6159       return DAG.getMergeValues({SubVec, Src.getValue(1)}, DL);
6160     }
6161     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
6162     return convertFromScalableVector(VT, Src, DAG, Subtarget);
6163   }
6164   case ISD::FP_TO_SINT_SAT:
6165   case ISD::FP_TO_UINT_SAT:
6166     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
6167   case ISD::FP_TO_BF16: {
6168     // Custom lower to ensure the libcall return is passed in an FPR on hard
6169     // float ABIs.
6170     assert(!Subtarget.isSoftFPABI() && "Unexpected custom legalization");
6171     SDLoc DL(Op);
6172     MakeLibCallOptions CallOptions;
6173     RTLIB::Libcall LC =
6174         RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
6175     SDValue Res =
6176         makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
6177     if (Subtarget.is64Bit() && !RV64LegalI32)
6178       return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
6179     return DAG.getBitcast(MVT::i32, Res);
6180   }
6181   case ISD::BF16_TO_FP: {
6182     assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalization");
6183     MVT VT = Op.getSimpleValueType();
6184     SDLoc DL(Op);
6185     Op = DAG.getNode(
6186         ISD::SHL, DL, Op.getOperand(0).getValueType(), Op.getOperand(0),
6187         DAG.getShiftAmountConstant(16, Op.getOperand(0).getValueType(), DL));
6188     SDValue Res = Subtarget.is64Bit()
6189                       ? DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Op)
6190                       : DAG.getBitcast(MVT::f32, Op);
6191     // fp_extend if the target VT is bigger than f32.
6192     if (VT != MVT::f32)
6193       return DAG.getNode(ISD::FP_EXTEND, DL, VT, Res);
6194     return Res;
6195   }
6196   case ISD::FP_TO_FP16: {
6197     // Custom lower to ensure the libcall return is passed in an FPR on hard
6198     // float ABIs.
6199     assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
6200     SDLoc DL(Op);
6201     MakeLibCallOptions CallOptions;
6202     RTLIB::Libcall LC =
6203         RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::f16);
6204     SDValue Res =
6205         makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
6206     if (Subtarget.is64Bit() && !RV64LegalI32)
6207       return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
6208     return DAG.getBitcast(MVT::i32, Res);
6209   }
6210   case ISD::FP16_TO_FP: {
6211     // Custom lower to ensure the libcall argument is passed in an FPR on hard
6212     // float ABIs.
6213     assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
6214     SDLoc DL(Op);
6215     MakeLibCallOptions CallOptions;
6216     SDValue Arg = Subtarget.is64Bit()
6217                       ? DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32,
6218                                     Op.getOperand(0))
6219                       : DAG.getBitcast(MVT::f32, Op.getOperand(0));
6220     SDValue Res =
6221         makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg, CallOptions, DL)
6222             .first;
6223     return Res;
6224   }
6225   case ISD::FTRUNC:
6226   case ISD::FCEIL:
6227   case ISD::FFLOOR:
6228   case ISD::FNEARBYINT:
6229   case ISD::FRINT:
6230   case ISD::FROUND:
6231   case ISD::FROUNDEVEN:
6232     return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
6233   case ISD::LRINT:
6234   case ISD::LLRINT:
6235     return lowerVectorXRINT(Op, DAG, Subtarget);
6236   case ISD::VECREDUCE_ADD:
6237   case ISD::VECREDUCE_UMAX:
6238   case ISD::VECREDUCE_SMAX:
6239   case ISD::VECREDUCE_UMIN:
6240   case ISD::VECREDUCE_SMIN:
6241     return lowerVECREDUCE(Op, DAG);
6242   case ISD::VECREDUCE_AND:
6243   case ISD::VECREDUCE_OR:
6244   case ISD::VECREDUCE_XOR:
6245     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6246       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
6247     return lowerVECREDUCE(Op, DAG);
6248   case ISD::VECREDUCE_FADD:
6249   case ISD::VECREDUCE_SEQ_FADD:
6250   case ISD::VECREDUCE_FMIN:
6251   case ISD::VECREDUCE_FMAX:
6252     return lowerFPVECREDUCE(Op, DAG);
6253   case ISD::VP_REDUCE_ADD:
6254   case ISD::VP_REDUCE_UMAX:
6255   case ISD::VP_REDUCE_SMAX:
6256   case ISD::VP_REDUCE_UMIN:
6257   case ISD::VP_REDUCE_SMIN:
6258   case ISD::VP_REDUCE_FADD:
6259   case ISD::VP_REDUCE_SEQ_FADD:
6260   case ISD::VP_REDUCE_FMIN:
6261   case ISD::VP_REDUCE_FMAX:
6262     if (Op.getOperand(1).getValueType() == MVT::nxv32f16 &&
6263         (Subtarget.hasVInstructionsF16Minimal() &&
6264          !Subtarget.hasVInstructionsF16()))
6265       return SplitVectorReductionOp(Op, DAG);
6266     return lowerVPREDUCE(Op, DAG);
6267   case ISD::VP_REDUCE_AND:
6268   case ISD::VP_REDUCE_OR:
6269   case ISD::VP_REDUCE_XOR:
6270     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
6271       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
6272     return lowerVPREDUCE(Op, DAG);
6273   case ISD::UNDEF: {
6274     MVT ContainerVT = getContainerForFixedLengthVector(Op.getSimpleValueType());
6275     return convertFromScalableVector(Op.getSimpleValueType(),
6276                                      DAG.getUNDEF(ContainerVT), DAG, Subtarget);
6277   }
6278   case ISD::INSERT_SUBVECTOR:
6279     return lowerINSERT_SUBVECTOR(Op, DAG);
6280   case ISD::EXTRACT_SUBVECTOR:
6281     return lowerEXTRACT_SUBVECTOR(Op, DAG);
6282   case ISD::VECTOR_DEINTERLEAVE:
6283     return lowerVECTOR_DEINTERLEAVE(Op, DAG);
6284   case ISD::VECTOR_INTERLEAVE:
6285     return lowerVECTOR_INTERLEAVE(Op, DAG);
6286   case ISD::STEP_VECTOR:
6287     return lowerSTEP_VECTOR(Op, DAG);
6288   case ISD::VECTOR_REVERSE:
6289     return lowerVECTOR_REVERSE(Op, DAG);
6290   case ISD::VECTOR_SPLICE:
6291     return lowerVECTOR_SPLICE(Op, DAG);
6292   case ISD::BUILD_VECTOR:
6293     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
6294   case ISD::SPLAT_VECTOR:
6295     if (Op.getValueType().getScalarType() == MVT::f16 &&
6296         (Subtarget.hasVInstructionsF16Minimal() &&
6297          !Subtarget.hasVInstructionsF16())) {
6298       if (Op.getValueType() == MVT::nxv32f16)
6299         return SplitVectorOp(Op, DAG);
6300       SDLoc DL(Op);
6301       SDValue NewScalar =
6302           DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
6303       SDValue NewSplat = DAG.getNode(
6304           ISD::SPLAT_VECTOR, DL,
6305           MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount()),
6306           NewScalar);
6307       return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NewSplat,
6308                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
6309     }
6310     if (Op.getValueType().getVectorElementType() == MVT::i1)
6311       return lowerVectorMaskSplat(Op, DAG);
6312     return SDValue();
6313   case ISD::VECTOR_SHUFFLE:
6314     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
6315   case ISD::CONCAT_VECTORS: {
6316     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
6317     // better than going through the stack, as the default expansion does.
6318     SDLoc DL(Op);
6319     MVT VT = Op.getSimpleValueType();
6320     unsigned NumOpElts =
6321         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
6322     SDValue Vec = DAG.getUNDEF(VT);
6323     for (const auto &OpIdx : enumerate(Op->ops())) {
6324       SDValue SubVec = OpIdx.value();
6325       // Don't insert undef subvectors.
6326       if (SubVec.isUndef())
6327         continue;
6328       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
6329                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
6330     }
6331     return Vec;
6332   }
6333   case ISD::LOAD:
6334     if (auto V = expandUnalignedRVVLoad(Op, DAG))
6335       return V;
6336     if (Op.getValueType().isFixedLengthVector())
6337       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
6338     return Op;
6339   case ISD::STORE:
6340     if (auto V = expandUnalignedRVVStore(Op, DAG))
6341       return V;
6342     if (Op.getOperand(1).getValueType().isFixedLengthVector())
6343       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
6344     return Op;
6345   case ISD::MLOAD:
6346   case ISD::VP_LOAD:
6347     return lowerMaskedLoad(Op, DAG);
6348   case ISD::MSTORE:
6349   case ISD::VP_STORE:
6350     return lowerMaskedStore(Op, DAG);
6351   case ISD::SELECT_CC: {
6352     // This occurs because we custom legalize SETGT and SETUGT for setcc. That
6353     // causes LegalizeDAG to think we need to custom legalize select_cc. Expand
6354     // into separate SETCC+SELECT just like LegalizeDAG.
6355     SDValue Tmp1 = Op.getOperand(0);
6356     SDValue Tmp2 = Op.getOperand(1);
6357     SDValue True = Op.getOperand(2);
6358     SDValue False = Op.getOperand(3);
6359     EVT VT = Op.getValueType();
6360     SDValue CC = Op.getOperand(4);
6361     EVT CmpVT = Tmp1.getValueType();
6362     EVT CCVT =
6363         getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
6364     SDLoc DL(Op);
6365     SDValue Cond =
6366         DAG.getNode(ISD::SETCC, DL, CCVT, Tmp1, Tmp2, CC, Op->getFlags());
6367     return DAG.getSelect(DL, VT, Cond, True, False);
6368   }
6369   case ISD::SETCC: {
6370     MVT OpVT = Op.getOperand(0).getSimpleValueType();
6371     if (OpVT.isScalarInteger()) {
6372       MVT VT = Op.getSimpleValueType();
6373       SDValue LHS = Op.getOperand(0);
6374       SDValue RHS = Op.getOperand(1);
6375       ISD::CondCode CCVal = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6376       assert((CCVal == ISD::SETGT || CCVal == ISD::SETUGT) &&
6377              "Unexpected CondCode");
6378 
6379       SDLoc DL(Op);
6380 
6381       // If the RHS is a constant in the range [-2049, 0) or (0, 2046], we can
6382       // convert this to the equivalent of (set(u)ge X, C+1) by using
6383       // (xori (slti(u) X, C+1), 1). This avoids materializing a small constant
6384       // in a register.
6385       if (isa<ConstantSDNode>(RHS)) {
6386         int64_t Imm = cast<ConstantSDNode>(RHS)->getSExtValue();
6387         if (Imm != 0 && isInt<12>((uint64_t)Imm + 1)) {
6388           // If this is an unsigned compare and the constant is -1, incrementing
6389           // the constant would change behavior. The result should be false.
6390           if (CCVal == ISD::SETUGT && Imm == -1)
6391             return DAG.getConstant(0, DL, VT);
6392           // Using getSetCCSwappedOperands will convert SET(U)GT->SET(U)LT.
6393           CCVal = ISD::getSetCCSwappedOperands(CCVal);
6394           SDValue SetCC = DAG.getSetCC(
6395               DL, VT, LHS, DAG.getConstant(Imm + 1, DL, OpVT), CCVal);
6396           return DAG.getLogicalNOT(DL, SetCC, VT);
6397         }
6398       }
6399 
6400       // Not a constant we could handle, swap the operands and condition code to
6401       // SETLT/SETULT.
6402       CCVal = ISD::getSetCCSwappedOperands(CCVal);
6403       return DAG.getSetCC(DL, VT, RHS, LHS, CCVal);
6404     }
6405 
6406     if (Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6407         (Subtarget.hasVInstructionsF16Minimal() &&
6408          !Subtarget.hasVInstructionsF16()))
6409       return SplitVectorOp(Op, DAG);
6410 
6411     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
6412   }
6413   case ISD::ADD:
6414   case ISD::SUB:
6415   case ISD::MUL:
6416   case ISD::MULHS:
6417   case ISD::MULHU:
6418   case ISD::AND:
6419   case ISD::OR:
6420   case ISD::XOR:
6421   case ISD::SDIV:
6422   case ISD::SREM:
6423   case ISD::UDIV:
6424   case ISD::UREM:
6425   case ISD::BSWAP:
6426   case ISD::CTPOP:
6427     return lowerToScalableOp(Op, DAG);
6428   case ISD::SHL:
6429   case ISD::SRA:
6430   case ISD::SRL:
6431     if (Op.getSimpleValueType().isFixedLengthVector())
6432       return lowerToScalableOp(Op, DAG);
6433     // This can be called for an i32 shift amount that needs to be promoted.
6434     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
6435            "Unexpected custom legalisation");
6436     return SDValue();
6437   case ISD::FADD:
6438   case ISD::FSUB:
6439   case ISD::FMUL:
6440   case ISD::FDIV:
6441   case ISD::FNEG:
6442   case ISD::FABS:
6443   case ISD::FSQRT:
6444   case ISD::FMA:
6445   case ISD::FMINNUM:
6446   case ISD::FMAXNUM:
6447     if (Op.getValueType() == MVT::nxv32f16 &&
6448         (Subtarget.hasVInstructionsF16Minimal() &&
6449          !Subtarget.hasVInstructionsF16()))
6450       return SplitVectorOp(Op, DAG);
6451     [[fallthrough]];
6452   case ISD::SADDSAT:
6453   case ISD::UADDSAT:
6454   case ISD::SSUBSAT:
6455   case ISD::USUBSAT:
6456   case ISD::SMIN:
6457   case ISD::SMAX:
6458   case ISD::UMIN:
6459   case ISD::UMAX:
6460     return lowerToScalableOp(Op, DAG);
6461   case ISD::ABS:
6462   case ISD::VP_ABS:
6463     return lowerABS(Op, DAG);
6464   case ISD::CTLZ:
6465   case ISD::CTLZ_ZERO_UNDEF:
6466   case ISD::CTTZ:
6467   case ISD::CTTZ_ZERO_UNDEF:
6468     if (Subtarget.hasStdExtZvbb())
6469       return lowerToScalableOp(Op, DAG);
6470     assert(Op.getOpcode() != ISD::CTTZ);
6471     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
6472   case ISD::VSELECT:
6473     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
6474   case ISD::FCOPYSIGN:
6475     if (Op.getValueType() == MVT::nxv32f16 &&
6476         (Subtarget.hasVInstructionsF16Minimal() &&
6477          !Subtarget.hasVInstructionsF16()))
6478       return SplitVectorOp(Op, DAG);
6479     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
6480   case ISD::STRICT_FADD:
6481   case ISD::STRICT_FSUB:
6482   case ISD::STRICT_FMUL:
6483   case ISD::STRICT_FDIV:
6484   case ISD::STRICT_FSQRT:
6485   case ISD::STRICT_FMA:
6486     if (Op.getValueType() == MVT::nxv32f16 &&
6487         (Subtarget.hasVInstructionsF16Minimal() &&
6488          !Subtarget.hasVInstructionsF16()))
6489       return SplitStrictFPVectorOp(Op, DAG);
6490     return lowerToScalableOp(Op, DAG);
6491   case ISD::STRICT_FSETCC:
6492   case ISD::STRICT_FSETCCS:
6493     return lowerVectorStrictFSetcc(Op, DAG);
6494   case ISD::STRICT_FCEIL:
6495   case ISD::STRICT_FRINT:
6496   case ISD::STRICT_FFLOOR:
6497   case ISD::STRICT_FTRUNC:
6498   case ISD::STRICT_FNEARBYINT:
6499   case ISD::STRICT_FROUND:
6500   case ISD::STRICT_FROUNDEVEN:
6501     return lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
6502   case ISD::MGATHER:
6503   case ISD::VP_GATHER:
6504     return lowerMaskedGather(Op, DAG);
6505   case ISD::MSCATTER:
6506   case ISD::VP_SCATTER:
6507     return lowerMaskedScatter(Op, DAG);
6508   case ISD::GET_ROUNDING:
6509     return lowerGET_ROUNDING(Op, DAG);
6510   case ISD::SET_ROUNDING:
6511     return lowerSET_ROUNDING(Op, DAG);
6512   case ISD::EH_DWARF_CFA:
6513     return lowerEH_DWARF_CFA(Op, DAG);
6514   case ISD::VP_SELECT:
6515   case ISD::VP_MERGE:
6516   case ISD::VP_ADD:
6517   case ISD::VP_SUB:
6518   case ISD::VP_MUL:
6519   case ISD::VP_SDIV:
6520   case ISD::VP_UDIV:
6521   case ISD::VP_SREM:
6522   case ISD::VP_UREM:
6523     return lowerVPOp(Op, DAG);
6524   case ISD::VP_AND:
6525   case ISD::VP_OR:
6526   case ISD::VP_XOR:
6527     return lowerLogicVPOp(Op, DAG);
6528   case ISD::VP_FADD:
6529   case ISD::VP_FSUB:
6530   case ISD::VP_FMUL:
6531   case ISD::VP_FDIV:
6532   case ISD::VP_FNEG:
6533   case ISD::VP_FABS:
6534   case ISD::VP_SQRT:
6535   case ISD::VP_FMA:
6536   case ISD::VP_FMINNUM:
6537   case ISD::VP_FMAXNUM:
6538   case ISD::VP_FCOPYSIGN:
6539     if (Op.getValueType() == MVT::nxv32f16 &&
6540         (Subtarget.hasVInstructionsF16Minimal() &&
6541          !Subtarget.hasVInstructionsF16()))
6542       return SplitVPOp(Op, DAG);
6543     [[fallthrough]];
6544   case ISD::VP_ASHR:
6545   case ISD::VP_LSHR:
6546   case ISD::VP_SHL:
6547     return lowerVPOp(Op, DAG);
6548   case ISD::VP_IS_FPCLASS:
6549     return LowerIS_FPCLASS(Op, DAG);
6550   case ISD::VP_SIGN_EXTEND:
6551   case ISD::VP_ZERO_EXTEND:
6552     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6553       return lowerVPExtMaskOp(Op, DAG);
6554     return lowerVPOp(Op, DAG);
6555   case ISD::VP_TRUNCATE:
6556     return lowerVectorTruncLike(Op, DAG);
6557   case ISD::VP_FP_EXTEND:
6558   case ISD::VP_FP_ROUND:
6559     return lowerVectorFPExtendOrRoundLike(Op, DAG);
6560   case ISD::VP_SINT_TO_FP:
6561   case ISD::VP_UINT_TO_FP:
6562     if (Op.getValueType().isVector() &&
6563         Op.getValueType().getScalarType() == MVT::f16 &&
6564         (Subtarget.hasVInstructionsF16Minimal() &&
6565          !Subtarget.hasVInstructionsF16())) {
6566       if (Op.getValueType() == MVT::nxv32f16)
6567         return SplitVPOp(Op, DAG);
6568       // int -> f32
6569       SDLoc DL(Op);
6570       MVT NVT =
6571           MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount());
6572       auto NC = DAG.getNode(Op.getOpcode(), DL, NVT, Op->ops());
6573       // f32 -> f16
6574       return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NC,
6575                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
6576     }
6577     [[fallthrough]];
6578   case ISD::VP_FP_TO_SINT:
6579   case ISD::VP_FP_TO_UINT:
6580     if (SDValue Op1 = Op.getOperand(0);
6581         Op1.getValueType().isVector() &&
6582         Op1.getValueType().getScalarType() == MVT::f16 &&
6583         (Subtarget.hasVInstructionsF16Minimal() &&
6584          !Subtarget.hasVInstructionsF16())) {
6585       if (Op1.getValueType() == MVT::nxv32f16)
6586         return SplitVPOp(Op, DAG);
6587       // f16 -> f32
6588       SDLoc DL(Op);
6589       MVT NVT = MVT::getVectorVT(MVT::f32,
6590                                  Op1.getValueType().getVectorElementCount());
6591       SDValue WidenVec = DAG.getNode(ISD::FP_EXTEND, DL, NVT, Op1);
6592       // f32 -> int
6593       return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
6594                          {WidenVec, Op.getOperand(1), Op.getOperand(2)});
6595     }
6596     return lowerVPFPIntConvOp(Op, DAG);
6597   case ISD::VP_SETCC:
6598     if (Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6599         (Subtarget.hasVInstructionsF16Minimal() &&
6600          !Subtarget.hasVInstructionsF16()))
6601       return SplitVPOp(Op, DAG);
6602     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6603       return lowerVPSetCCMaskOp(Op, DAG);
6604     [[fallthrough]];
6605   case ISD::VP_SMIN:
6606   case ISD::VP_SMAX:
6607   case ISD::VP_UMIN:
6608   case ISD::VP_UMAX:
6609   case ISD::VP_BITREVERSE:
6610   case ISD::VP_BSWAP:
6611     return lowerVPOp(Op, DAG);
6612   case ISD::VP_CTLZ:
6613   case ISD::VP_CTLZ_ZERO_UNDEF:
6614     if (Subtarget.hasStdExtZvbb())
6615       return lowerVPOp(Op, DAG);
6616     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
6617   case ISD::VP_CTTZ:
6618   case ISD::VP_CTTZ_ZERO_UNDEF:
6619     if (Subtarget.hasStdExtZvbb())
6620       return lowerVPOp(Op, DAG);
6621     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
6622   case ISD::VP_CTPOP:
6623     return lowerVPOp(Op, DAG);
6624   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
6625     return lowerVPStridedLoad(Op, DAG);
6626   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6627     return lowerVPStridedStore(Op, DAG);
6628   case ISD::VP_FCEIL:
6629   case ISD::VP_FFLOOR:
6630   case ISD::VP_FRINT:
6631   case ISD::VP_FNEARBYINT:
6632   case ISD::VP_FROUND:
6633   case ISD::VP_FROUNDEVEN:
6634   case ISD::VP_FROUNDTOZERO:
6635     if (Op.getValueType() == MVT::nxv32f16 &&
6636         (Subtarget.hasVInstructionsF16Minimal() &&
6637          !Subtarget.hasVInstructionsF16()))
6638       return SplitVPOp(Op, DAG);
6639     return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
6640   case ISD::EXPERIMENTAL_VP_REVERSE:
6641     return lowerVPReverseExperimental(Op, DAG);
6642   }
6643 }
6644 
6645 static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty,
6646                              SelectionDAG &DAG, unsigned Flags) {
6647   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
6648 }
6649 
6650 static SDValue getTargetNode(BlockAddressSDNode *N, const SDLoc &DL, EVT Ty,
6651                              SelectionDAG &DAG, unsigned Flags) {
6652   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
6653                                    Flags);
6654 }
6655 
6656 static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty,
6657                              SelectionDAG &DAG, unsigned Flags) {
6658   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
6659                                    N->getOffset(), Flags);
6660 }
6661 
6662 static SDValue getTargetNode(JumpTableSDNode *N, const SDLoc &DL, EVT Ty,
6663                              SelectionDAG &DAG, unsigned Flags) {
6664   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
6665 }
6666 
6667 template <class NodeTy>
6668 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
6669                                      bool IsLocal, bool IsExternWeak) const {
6670   SDLoc DL(N);
6671   EVT Ty = getPointerTy(DAG.getDataLayout());
6672 
6673   // When HWASAN is used and tagging of global variables is enabled
6674   // they should be accessed via the GOT, since the tagged address of a global
6675   // is incompatible with existing code models. This also applies to non-pic
6676   // mode.
6677   if (isPositionIndependent() || Subtarget.allowTaggedGlobals()) {
6678     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
6679     if (IsLocal && !Subtarget.allowTaggedGlobals())
6680       // Use PC-relative addressing to access the symbol. This generates the
6681       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
6682       // %pcrel_lo(auipc)).
6683       return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
6684 
6685     // Use PC-relative addressing to access the GOT for this symbol, then load
6686     // the address from the GOT. This generates the pattern (PseudoLGA sym),
6687     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
6688     SDValue Load =
6689         SDValue(DAG.getMachineNode(RISCV::PseudoLGA, DL, Ty, Addr), 0);
6690     MachineFunction &MF = DAG.getMachineFunction();
6691     MachineMemOperand *MemOp = MF.getMachineMemOperand(
6692         MachinePointerInfo::getGOT(MF),
6693         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
6694             MachineMemOperand::MOInvariant,
6695         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
6696     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
6697     return Load;
6698   }
6699 
6700   switch (getTargetMachine().getCodeModel()) {
6701   default:
6702     report_fatal_error("Unsupported code model for lowering");
6703   case CodeModel::Small: {
6704     // Generate a sequence for accessing addresses within the first 2 GiB of
6705     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
6706     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
6707     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
6708     SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
6709     return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNHi, AddrLo);
6710   }
6711   case CodeModel::Medium: {
6712     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
6713     if (IsExternWeak) {
6714       // An extern weak symbol may be undefined, i.e. have value 0, which may
6715       // not be within 2GiB of PC, so use GOT-indirect addressing to access the
6716       // symbol. This generates the pattern (PseudoLGA sym), which expands to
6717       // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
6718       SDValue Load =
6719           SDValue(DAG.getMachineNode(RISCV::PseudoLGA, DL, Ty, Addr), 0);
6720       MachineFunction &MF = DAG.getMachineFunction();
6721       MachineMemOperand *MemOp = MF.getMachineMemOperand(
6722           MachinePointerInfo::getGOT(MF),
6723           MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
6724               MachineMemOperand::MOInvariant,
6725           LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
6726       DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
6727       return Load;
6728     }
6729 
6730     // Generate a sequence for accessing addresses within any 2GiB range within
6731     // the address space. This generates the pattern (PseudoLLA sym), which
6732     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
6733     return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
6734   }
6735   }
6736 }
6737 
6738 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
6739                                                 SelectionDAG &DAG) const {
6740   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
6741   assert(N->getOffset() == 0 && "unexpected offset in global node");
6742   const GlobalValue *GV = N->getGlobal();
6743   return getAddr(N, DAG, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
6744 }
6745 
6746 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
6747                                                SelectionDAG &DAG) const {
6748   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
6749 
6750   return getAddr(N, DAG);
6751 }
6752 
6753 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
6754                                                SelectionDAG &DAG) const {
6755   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
6756 
6757   return getAddr(N, DAG);
6758 }
6759 
6760 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
6761                                             SelectionDAG &DAG) const {
6762   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
6763 
6764   return getAddr(N, DAG);
6765 }
6766 
6767 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
6768                                               SelectionDAG &DAG,
6769                                               bool UseGOT) const {
6770   SDLoc DL(N);
6771   EVT Ty = getPointerTy(DAG.getDataLayout());
6772   const GlobalValue *GV = N->getGlobal();
6773   MVT XLenVT = Subtarget.getXLenVT();
6774 
6775   if (UseGOT) {
6776     // Use PC-relative addressing to access the GOT for this TLS symbol, then
6777     // load the address from the GOT and add the thread pointer. This generates
6778     // the pattern (PseudoLA_TLS_IE sym), which expands to
6779     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
6780     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
6781     SDValue Load =
6782         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
6783     MachineFunction &MF = DAG.getMachineFunction();
6784     MachineMemOperand *MemOp = MF.getMachineMemOperand(
6785         MachinePointerInfo::getGOT(MF),
6786         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
6787             MachineMemOperand::MOInvariant,
6788         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
6789     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
6790 
6791     // Add the thread pointer.
6792     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
6793     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
6794   }
6795 
6796   // Generate a sequence for accessing the address relative to the thread
6797   // pointer, with the appropriate adjustment for the thread pointer offset.
6798   // This generates the pattern
6799   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
6800   SDValue AddrHi =
6801       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
6802   SDValue AddrAdd =
6803       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
6804   SDValue AddrLo =
6805       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
6806 
6807   SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
6808   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
6809   SDValue MNAdd =
6810       DAG.getNode(RISCVISD::ADD_TPREL, DL, Ty, MNHi, TPReg, AddrAdd);
6811   return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNAdd, AddrLo);
6812 }
6813 
6814 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
6815                                                SelectionDAG &DAG) const {
6816   SDLoc DL(N);
6817   EVT Ty = getPointerTy(DAG.getDataLayout());
6818   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
6819   const GlobalValue *GV = N->getGlobal();
6820 
6821   // Use a PC-relative addressing mode to access the global dynamic GOT address.
6822   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
6823   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
6824   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
6825   SDValue Load =
6826       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
6827 
6828   // Prepare argument list to generate call.
6829   ArgListTy Args;
6830   ArgListEntry Entry;
6831   Entry.Node = Load;
6832   Entry.Ty = CallTy;
6833   Args.push_back(Entry);
6834 
6835   // Setup call to __tls_get_addr.
6836   TargetLowering::CallLoweringInfo CLI(DAG);
6837   CLI.setDebugLoc(DL)
6838       .setChain(DAG.getEntryNode())
6839       .setLibCallee(CallingConv::C, CallTy,
6840                     DAG.getExternalSymbol("__tls_get_addr", Ty),
6841                     std::move(Args));
6842 
6843   return LowerCallTo(CLI).first;
6844 }
6845 
6846 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
6847                                                    SelectionDAG &DAG) const {
6848   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
6849   assert(N->getOffset() == 0 && "unexpected offset in global node");
6850 
6851   if (DAG.getTarget().useEmulatedTLS())
6852     return LowerToTLSEmulatedModel(N, DAG);
6853 
6854   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
6855 
6856   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
6857       CallingConv::GHC)
6858     report_fatal_error("In GHC calling convention TLS is not supported");
6859 
6860   SDValue Addr;
6861   switch (Model) {
6862   case TLSModel::LocalExec:
6863     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
6864     break;
6865   case TLSModel::InitialExec:
6866     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
6867     break;
6868   case TLSModel::LocalDynamic:
6869   case TLSModel::GeneralDynamic:
6870     Addr = getDynamicTLSAddr(N, DAG);
6871     break;
6872   }
6873 
6874   return Addr;
6875 }
6876 
6877 // Return true if Val is equal to (setcc LHS, RHS, CC).
6878 // Return false if Val is the inverse of (setcc LHS, RHS, CC).
6879 // Otherwise, return std::nullopt.
6880 static std::optional<bool> matchSetCC(SDValue LHS, SDValue RHS,
6881                                       ISD::CondCode CC, SDValue Val) {
6882   assert(Val->getOpcode() == ISD::SETCC);
6883   SDValue LHS2 = Val.getOperand(0);
6884   SDValue RHS2 = Val.getOperand(1);
6885   ISD::CondCode CC2 = cast<CondCodeSDNode>(Val.getOperand(2))->get();
6886 
6887   if (LHS == LHS2 && RHS == RHS2) {
6888     if (CC == CC2)
6889       return true;
6890     if (CC == ISD::getSetCCInverse(CC2, LHS2.getValueType()))
6891       return false;
6892   } else if (LHS == RHS2 && RHS == LHS2) {
6893     CC2 = ISD::getSetCCSwappedOperands(CC2);
6894     if (CC == CC2)
6895       return true;
6896     if (CC == ISD::getSetCCInverse(CC2, LHS2.getValueType()))
6897       return false;
6898   }
6899 
6900   return std::nullopt;
6901 }
6902 
6903 static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG,
6904                                     const RISCVSubtarget &Subtarget) {
6905   SDValue CondV = N->getOperand(0);
6906   SDValue TrueV = N->getOperand(1);
6907   SDValue FalseV = N->getOperand(2);
6908   MVT VT = N->getSimpleValueType(0);
6909   SDLoc DL(N);
6910 
6911   if (!Subtarget.hasShortForwardBranchOpt()) {
6912     // (select c, -1, y) -> -c | y
6913     if (isAllOnesConstant(TrueV)) {
6914       SDValue Neg = DAG.getNegative(CondV, DL, VT);
6915       return DAG.getNode(ISD::OR, DL, VT, Neg, FalseV);
6916     }
6917     // (select c, y, -1) -> (c-1) | y
6918     if (isAllOnesConstant(FalseV)) {
6919       SDValue Neg = DAG.getNode(ISD::ADD, DL, VT, CondV,
6920                                 DAG.getAllOnesConstant(DL, VT));
6921       return DAG.getNode(ISD::OR, DL, VT, Neg, TrueV);
6922     }
6923 
6924     // (select c, 0, y) -> (c-1) & y
6925     if (isNullConstant(TrueV)) {
6926       SDValue Neg = DAG.getNode(ISD::ADD, DL, VT, CondV,
6927                                 DAG.getAllOnesConstant(DL, VT));
6928       return DAG.getNode(ISD::AND, DL, VT, Neg, FalseV);
6929     }
6930     // (select c, y, 0) -> -c & y
6931     if (isNullConstant(FalseV)) {
6932       SDValue Neg = DAG.getNegative(CondV, DL, VT);
6933       return DAG.getNode(ISD::AND, DL, VT, Neg, TrueV);
6934     }
6935   }
6936 
6937   // Try to fold (select (setcc lhs, rhs, cc), truev, falsev) into bitwise ops
6938   // when both truev and falsev are also setcc.
6939   if (CondV.getOpcode() == ISD::SETCC && TrueV.getOpcode() == ISD::SETCC &&
6940       FalseV.getOpcode() == ISD::SETCC) {
6941     SDValue LHS = CondV.getOperand(0);
6942     SDValue RHS = CondV.getOperand(1);
6943     ISD::CondCode CC = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
6944 
6945     // (select x, x, y) -> x | y
6946     // (select !x, x, y) -> x & y
6947     if (std::optional<bool> MatchResult = matchSetCC(LHS, RHS, CC, TrueV)) {
6948       return DAG.getNode(*MatchResult ? ISD::OR : ISD::AND, DL, VT, TrueV,
6949                          FalseV);
6950     }
6951     // (select x, y, x) -> x & y
6952     // (select !x, y, x) -> x | y
6953     if (std::optional<bool> MatchResult = matchSetCC(LHS, RHS, CC, FalseV)) {
6954       return DAG.getNode(*MatchResult ? ISD::AND : ISD::OR, DL, VT, TrueV,
6955                          FalseV);
6956     }
6957   }
6958 
6959   return SDValue();
6960 }
6961 
6962 // Transform `binOp (select cond, x, c0), c1` where `c0` and `c1` are constants
6963 // into `select cond, binOp(x, c1), binOp(c0, c1)` if profitable.
6964 // For now we only consider transformation profitable if `binOp(c0, c1)` ends up
6965 // being `0` or `-1`. In such cases we can replace `select` with `and`.
6966 // TODO: Should we also do this if `binOp(c0, c1)` is cheaper to materialize
6967 // than `c0`?
6968 static SDValue
6969 foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG,
6970                                 const RISCVSubtarget &Subtarget) {
6971   if (Subtarget.hasShortForwardBranchOpt())
6972     return SDValue();
6973 
6974   unsigned SelOpNo = 0;
6975   SDValue Sel = BO->getOperand(0);
6976   if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
6977     SelOpNo = 1;
6978     Sel = BO->getOperand(1);
6979   }
6980 
6981   if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
6982     return SDValue();
6983 
6984   unsigned ConstSelOpNo = 1;
6985   unsigned OtherSelOpNo = 2;
6986   if (!dyn_cast<ConstantSDNode>(Sel->getOperand(ConstSelOpNo))) {
6987     ConstSelOpNo = 2;
6988     OtherSelOpNo = 1;
6989   }
6990   SDValue ConstSelOp = Sel->getOperand(ConstSelOpNo);
6991   ConstantSDNode *ConstSelOpNode = dyn_cast<ConstantSDNode>(ConstSelOp);
6992   if (!ConstSelOpNode || ConstSelOpNode->isOpaque())
6993     return SDValue();
6994 
6995   SDValue ConstBinOp = BO->getOperand(SelOpNo ^ 1);
6996   ConstantSDNode *ConstBinOpNode = dyn_cast<ConstantSDNode>(ConstBinOp);
6997   if (!ConstBinOpNode || ConstBinOpNode->isOpaque())
6998     return SDValue();
6999 
7000   SDLoc DL(Sel);
7001   EVT VT = BO->getValueType(0);
7002 
7003   SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
7004   if (SelOpNo == 1)
7005     std::swap(NewConstOps[0], NewConstOps[1]);
7006 
7007   SDValue NewConstOp =
7008       DAG.FoldConstantArithmetic(BO->getOpcode(), DL, VT, NewConstOps);
7009   if (!NewConstOp)
7010     return SDValue();
7011 
7012   const APInt &NewConstAPInt =
7013       cast<ConstantSDNode>(NewConstOp)->getAPIntValue();
7014   if (!NewConstAPInt.isZero() && !NewConstAPInt.isAllOnes())
7015     return SDValue();
7016 
7017   SDValue OtherSelOp = Sel->getOperand(OtherSelOpNo);
7018   SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
7019   if (SelOpNo == 1)
7020     std::swap(NewNonConstOps[0], NewNonConstOps[1]);
7021   SDValue NewNonConstOp = DAG.getNode(BO->getOpcode(), DL, VT, NewNonConstOps);
7022 
7023   SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
7024   SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
7025   return DAG.getSelect(DL, VT, Sel.getOperand(0), NewT, NewF);
7026 }
7027 
7028 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
7029   SDValue CondV = Op.getOperand(0);
7030   SDValue TrueV = Op.getOperand(1);
7031   SDValue FalseV = Op.getOperand(2);
7032   SDLoc DL(Op);
7033   MVT VT = Op.getSimpleValueType();
7034   MVT XLenVT = Subtarget.getXLenVT();
7035 
7036   // Lower vector SELECTs to VSELECTs by splatting the condition.
7037   if (VT.isVector()) {
7038     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
7039     SDValue CondSplat = DAG.getSplat(SplatCondVT, DL, CondV);
7040     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
7041   }
7042 
7043   // When Zicond or XVentanaCondOps is present, emit CZERO_EQZ and CZERO_NEZ
7044   // nodes to implement the SELECT. Performing the lowering here allows for
7045   // greater control over when CZERO_{EQZ/NEZ} are used vs another branchless
7046   // sequence or RISCVISD::SELECT_CC node (branch-based select).
7047   if ((Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps()) &&
7048       VT.isScalarInteger()) {
7049     // (select c, t, 0) -> (czero_eqz t, c)
7050     if (isNullConstant(FalseV))
7051       return DAG.getNode(RISCVISD::CZERO_EQZ, DL, VT, TrueV, CondV);
7052     // (select c, 0, f) -> (czero_nez f, c)
7053     if (isNullConstant(TrueV))
7054       return DAG.getNode(RISCVISD::CZERO_NEZ, DL, VT, FalseV, CondV);
7055 
7056     // (select c, (and f, x), f) -> (or (and f, x), (czero_nez f, c))
7057     if (TrueV.getOpcode() == ISD::AND &&
7058         (TrueV.getOperand(0) == FalseV || TrueV.getOperand(1) == FalseV))
7059       return DAG.getNode(
7060           ISD::OR, DL, VT, TrueV,
7061           DAG.getNode(RISCVISD::CZERO_NEZ, DL, VT, FalseV, CondV));
7062     // (select c, t, (and t, x)) -> (or (czero_eqz t, c), (and t, x))
7063     if (FalseV.getOpcode() == ISD::AND &&
7064         (FalseV.getOperand(0) == TrueV || FalseV.getOperand(1) == TrueV))
7065       return DAG.getNode(
7066           ISD::OR, DL, VT, FalseV,
7067           DAG.getNode(RISCVISD::CZERO_EQZ, DL, VT, TrueV, CondV));
7068 
7069     // Try some other optimizations before falling back to generic lowering.
7070     if (SDValue V = combineSelectToBinOp(Op.getNode(), DAG, Subtarget))
7071       return V;
7072 
7073     // (select c, t, f) -> (or (czero_eqz t, c), (czero_nez f, c))
7074     // Unless we have the short forward branch optimization.
7075     if (!Subtarget.hasShortForwardBranchOpt())
7076       return DAG.getNode(
7077           ISD::OR, DL, VT,
7078           DAG.getNode(RISCVISD::CZERO_EQZ, DL, VT, TrueV, CondV),
7079           DAG.getNode(RISCVISD::CZERO_NEZ, DL, VT, FalseV, CondV));
7080   }
7081 
7082   if (SDValue V = combineSelectToBinOp(Op.getNode(), DAG, Subtarget))
7083     return V;
7084 
7085   if (Op.hasOneUse()) {
7086     unsigned UseOpc = Op->use_begin()->getOpcode();
7087     if (isBinOp(UseOpc) && DAG.isSafeToSpeculativelyExecute(UseOpc)) {
7088       SDNode *BinOp = *Op->use_begin();
7089       if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(*Op->use_begin(),
7090                                                            DAG, Subtarget)) {
7091         DAG.ReplaceAllUsesWith(BinOp, &NewSel);
7092         return lowerSELECT(NewSel, DAG);
7093       }
7094     }
7095   }
7096 
7097   // (select cc, 1.0, 0.0) -> (sint_to_fp (zext cc))
7098   // (select cc, 0.0, 1.0) -> (sint_to_fp (zext (xor cc, 1)))
7099   const ConstantFPSDNode *FPTV = dyn_cast<ConstantFPSDNode>(TrueV);
7100   const ConstantFPSDNode *FPFV = dyn_cast<ConstantFPSDNode>(FalseV);
7101   if (FPTV && FPFV) {
7102     if (FPTV->isExactlyValue(1.0) && FPFV->isExactlyValue(0.0))
7103       return DAG.getNode(ISD::SINT_TO_FP, DL, VT, CondV);
7104     if (FPTV->isExactlyValue(0.0) && FPFV->isExactlyValue(1.0)) {
7105       SDValue XOR = DAG.getNode(ISD::XOR, DL, XLenVT, CondV,
7106                                 DAG.getConstant(1, DL, XLenVT));
7107       return DAG.getNode(ISD::SINT_TO_FP, DL, VT, XOR);
7108     }
7109   }
7110 
7111   // If the condition is not an integer SETCC which operates on XLenVT, we need
7112   // to emit a RISCVISD::SELECT_CC comparing the condition to zero. i.e.:
7113   // (select condv, truev, falsev)
7114   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
7115   if (CondV.getOpcode() != ISD::SETCC ||
7116       CondV.getOperand(0).getSimpleValueType() != XLenVT) {
7117     SDValue Zero = DAG.getConstant(0, DL, XLenVT);
7118     SDValue SetNE = DAG.getCondCode(ISD::SETNE);
7119 
7120     SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
7121 
7122     return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops);
7123   }
7124 
7125   // If the CondV is the output of a SETCC node which operates on XLenVT inputs,
7126   // then merge the SETCC node into the lowered RISCVISD::SELECT_CC to take
7127   // advantage of the integer compare+branch instructions. i.e.:
7128   // (select (setcc lhs, rhs, cc), truev, falsev)
7129   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
7130   SDValue LHS = CondV.getOperand(0);
7131   SDValue RHS = CondV.getOperand(1);
7132   ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
7133 
7134   // Special case for a select of 2 constants that have a diffence of 1.
7135   // Normally this is done by DAGCombine, but if the select is introduced by
7136   // type legalization or op legalization, we miss it. Restricting to SETLT
7137   // case for now because that is what signed saturating add/sub need.
7138   // FIXME: We don't need the condition to be SETLT or even a SETCC,
7139   // but we would probably want to swap the true/false values if the condition
7140   // is SETGE/SETLE to avoid an XORI.
7141   if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
7142       CCVal == ISD::SETLT) {
7143     const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
7144     const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
7145     if (TrueVal - 1 == FalseVal)
7146       return DAG.getNode(ISD::ADD, DL, VT, CondV, FalseV);
7147     if (TrueVal + 1 == FalseVal)
7148       return DAG.getNode(ISD::SUB, DL, VT, FalseV, CondV);
7149   }
7150 
7151   translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7152   // 1 < x ? x : 1 -> 0 < x ? x : 1
7153   if (isOneConstant(LHS) && (CCVal == ISD::SETLT || CCVal == ISD::SETULT) &&
7154       RHS == TrueV && LHS == FalseV) {
7155     LHS = DAG.getConstant(0, DL, VT);
7156     // 0 <u x is the same as x != 0.
7157     if (CCVal == ISD::SETULT) {
7158       std::swap(LHS, RHS);
7159       CCVal = ISD::SETNE;
7160     }
7161   }
7162 
7163   // x <s -1 ? x : -1 -> x <s 0 ? x : -1
7164   if (isAllOnesConstant(RHS) && CCVal == ISD::SETLT && LHS == TrueV &&
7165       RHS == FalseV) {
7166     RHS = DAG.getConstant(0, DL, VT);
7167   }
7168 
7169   SDValue TargetCC = DAG.getCondCode(CCVal);
7170 
7171   if (isa<ConstantSDNode>(TrueV) && !isa<ConstantSDNode>(FalseV)) {
7172     // (select (setcc lhs, rhs, CC), constant, falsev)
7173     // -> (select (setcc lhs, rhs, InverseCC), falsev, constant)
7174     std::swap(TrueV, FalseV);
7175     TargetCC = DAG.getCondCode(ISD::getSetCCInverse(CCVal, LHS.getValueType()));
7176   }
7177 
7178   SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
7179   return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops);
7180 }
7181 
7182 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
7183   SDValue CondV = Op.getOperand(1);
7184   SDLoc DL(Op);
7185   MVT XLenVT = Subtarget.getXLenVT();
7186 
7187   if (CondV.getOpcode() == ISD::SETCC &&
7188       CondV.getOperand(0).getValueType() == XLenVT) {
7189     SDValue LHS = CondV.getOperand(0);
7190     SDValue RHS = CondV.getOperand(1);
7191     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
7192 
7193     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7194 
7195     SDValue TargetCC = DAG.getCondCode(CCVal);
7196     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
7197                        LHS, RHS, TargetCC, Op.getOperand(2));
7198   }
7199 
7200   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
7201                      CondV, DAG.getConstant(0, DL, XLenVT),
7202                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
7203 }
7204 
7205 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
7206   MachineFunction &MF = DAG.getMachineFunction();
7207   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
7208 
7209   SDLoc DL(Op);
7210   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
7211                                  getPointerTy(MF.getDataLayout()));
7212 
7213   // vastart just stores the address of the VarArgsFrameIndex slot into the
7214   // memory location argument.
7215   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
7216   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
7217                       MachinePointerInfo(SV));
7218 }
7219 
7220 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
7221                                             SelectionDAG &DAG) const {
7222   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
7223   MachineFunction &MF = DAG.getMachineFunction();
7224   MachineFrameInfo &MFI = MF.getFrameInfo();
7225   MFI.setFrameAddressIsTaken(true);
7226   Register FrameReg = RI.getFrameRegister(MF);
7227   int XLenInBytes = Subtarget.getXLen() / 8;
7228 
7229   EVT VT = Op.getValueType();
7230   SDLoc DL(Op);
7231   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
7232   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7233   while (Depth--) {
7234     int Offset = -(XLenInBytes * 2);
7235     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
7236                               DAG.getIntPtrConstant(Offset, DL));
7237     FrameAddr =
7238         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
7239   }
7240   return FrameAddr;
7241 }
7242 
7243 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
7244                                              SelectionDAG &DAG) const {
7245   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
7246   MachineFunction &MF = DAG.getMachineFunction();
7247   MachineFrameInfo &MFI = MF.getFrameInfo();
7248   MFI.setReturnAddressIsTaken(true);
7249   MVT XLenVT = Subtarget.getXLenVT();
7250   int XLenInBytes = Subtarget.getXLen() / 8;
7251 
7252   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
7253     return SDValue();
7254 
7255   EVT VT = Op.getValueType();
7256   SDLoc DL(Op);
7257   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7258   if (Depth) {
7259     int Off = -XLenInBytes;
7260     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
7261     SDValue Offset = DAG.getConstant(Off, DL, VT);
7262     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
7263                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
7264                        MachinePointerInfo());
7265   }
7266 
7267   // Return the value of the return address register, marking it an implicit
7268   // live-in.
7269   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
7270   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
7271 }
7272 
7273 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
7274                                                  SelectionDAG &DAG) const {
7275   SDLoc DL(Op);
7276   SDValue Lo = Op.getOperand(0);
7277   SDValue Hi = Op.getOperand(1);
7278   SDValue Shamt = Op.getOperand(2);
7279   EVT VT = Lo.getValueType();
7280 
7281   // if Shamt-XLEN < 0: // Shamt < XLEN
7282   //   Lo = Lo << Shamt
7283   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
7284   // else:
7285   //   Lo = 0
7286   //   Hi = Lo << (Shamt-XLEN)
7287 
7288   SDValue Zero = DAG.getConstant(0, DL, VT);
7289   SDValue One = DAG.getConstant(1, DL, VT);
7290   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
7291   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
7292   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
7293   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
7294 
7295   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
7296   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
7297   SDValue ShiftRightLo =
7298       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
7299   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
7300   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
7301   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
7302 
7303   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
7304 
7305   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
7306   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
7307 
7308   SDValue Parts[2] = {Lo, Hi};
7309   return DAG.getMergeValues(Parts, DL);
7310 }
7311 
7312 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
7313                                                   bool IsSRA) const {
7314   SDLoc DL(Op);
7315   SDValue Lo = Op.getOperand(0);
7316   SDValue Hi = Op.getOperand(1);
7317   SDValue Shamt = Op.getOperand(2);
7318   EVT VT = Lo.getValueType();
7319 
7320   // SRA expansion:
7321   //   if Shamt-XLEN < 0: // Shamt < XLEN
7322   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
7323   //     Hi = Hi >>s Shamt
7324   //   else:
7325   //     Lo = Hi >>s (Shamt-XLEN);
7326   //     Hi = Hi >>s (XLEN-1)
7327   //
7328   // SRL expansion:
7329   //   if Shamt-XLEN < 0: // Shamt < XLEN
7330   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
7331   //     Hi = Hi >>u Shamt
7332   //   else:
7333   //     Lo = Hi >>u (Shamt-XLEN);
7334   //     Hi = 0;
7335 
7336   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
7337 
7338   SDValue Zero = DAG.getConstant(0, DL, VT);
7339   SDValue One = DAG.getConstant(1, DL, VT);
7340   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
7341   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
7342   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
7343   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
7344 
7345   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
7346   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
7347   SDValue ShiftLeftHi =
7348       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
7349   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
7350   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
7351   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
7352   SDValue HiFalse =
7353       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
7354 
7355   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
7356 
7357   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
7358   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
7359 
7360   SDValue Parts[2] = {Lo, Hi};
7361   return DAG.getMergeValues(Parts, DL);
7362 }
7363 
7364 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
7365 // legal equivalently-sized i8 type, so we can use that as a go-between.
7366 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
7367                                                   SelectionDAG &DAG) const {
7368   SDLoc DL(Op);
7369   MVT VT = Op.getSimpleValueType();
7370   SDValue SplatVal = Op.getOperand(0);
7371   // All-zeros or all-ones splats are handled specially.
7372   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
7373     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
7374     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
7375   }
7376   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
7377     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
7378     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
7379   }
7380   MVT InterVT = VT.changeVectorElementType(MVT::i8);
7381   SplatVal = DAG.getNode(ISD::AND, DL, SplatVal.getValueType(), SplatVal,
7382                          DAG.getConstant(1, DL, SplatVal.getValueType()));
7383   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
7384   SDValue Zero = DAG.getConstant(0, DL, InterVT);
7385   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
7386 }
7387 
7388 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
7389 // illegal (currently only vXi64 RV32).
7390 // FIXME: We could also catch non-constant sign-extended i32 values and lower
7391 // them to VMV_V_X_VL.
7392 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
7393                                                      SelectionDAG &DAG) const {
7394   SDLoc DL(Op);
7395   MVT VecVT = Op.getSimpleValueType();
7396   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
7397          "Unexpected SPLAT_VECTOR_PARTS lowering");
7398 
7399   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
7400   SDValue Lo = Op.getOperand(0);
7401   SDValue Hi = Op.getOperand(1);
7402 
7403   MVT ContainerVT = VecVT;
7404   if (VecVT.isFixedLengthVector())
7405     ContainerVT = getContainerForFixedLengthVector(VecVT);
7406 
7407   auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
7408 
7409   SDValue Res =
7410       splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
7411 
7412   if (VecVT.isFixedLengthVector())
7413     Res = convertFromScalableVector(VecVT, Res, DAG, Subtarget);
7414 
7415   return Res;
7416 }
7417 
7418 // Custom-lower extensions from mask vectors by using a vselect either with 1
7419 // for zero/any-extension or -1 for sign-extension:
7420 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
7421 // Note that any-extension is lowered identically to zero-extension.
7422 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
7423                                                 int64_t ExtTrueVal) const {
7424   SDLoc DL(Op);
7425   MVT VecVT = Op.getSimpleValueType();
7426   SDValue Src = Op.getOperand(0);
7427   // Only custom-lower extensions from mask types
7428   assert(Src.getValueType().isVector() &&
7429          Src.getValueType().getVectorElementType() == MVT::i1);
7430 
7431   if (VecVT.isScalableVector()) {
7432     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
7433     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
7434     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
7435   }
7436 
7437   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
7438   MVT I1ContainerVT =
7439       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
7440 
7441   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
7442 
7443   SDValue VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
7444 
7445   MVT XLenVT = Subtarget.getXLenVT();
7446   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
7447   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
7448 
7449   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7450                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
7451   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7452                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
7453   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
7454                                SplatTrueVal, SplatZero, VL);
7455 
7456   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
7457 }
7458 
7459 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
7460     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
7461   MVT ExtVT = Op.getSimpleValueType();
7462   // Only custom-lower extensions from fixed-length vector types.
7463   if (!ExtVT.isFixedLengthVector())
7464     return Op;
7465   MVT VT = Op.getOperand(0).getSimpleValueType();
7466   // Grab the canonical container type for the extended type. Infer the smaller
7467   // type from that to ensure the same number of vector elements, as we know
7468   // the LMUL will be sufficient to hold the smaller type.
7469   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
7470   // Get the extended container type manually to ensure the same number of
7471   // vector elements between source and dest.
7472   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
7473                                      ContainerExtVT.getVectorElementCount());
7474 
7475   SDValue Op1 =
7476       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
7477 
7478   SDLoc DL(Op);
7479   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
7480 
7481   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
7482 
7483   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
7484 }
7485 
7486 // Custom-lower truncations from vectors to mask vectors by using a mask and a
7487 // setcc operation:
7488 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
7489 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
7490                                                       SelectionDAG &DAG) const {
7491   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
7492   SDLoc DL(Op);
7493   EVT MaskVT = Op.getValueType();
7494   // Only expect to custom-lower truncations to mask types
7495   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
7496          "Unexpected type for vector mask lowering");
7497   SDValue Src = Op.getOperand(0);
7498   MVT VecVT = Src.getSimpleValueType();
7499   SDValue Mask, VL;
7500   if (IsVPTrunc) {
7501     Mask = Op.getOperand(1);
7502     VL = Op.getOperand(2);
7503   }
7504   // If this is a fixed vector, we need to convert it to a scalable vector.
7505   MVT ContainerVT = VecVT;
7506 
7507   if (VecVT.isFixedLengthVector()) {
7508     ContainerVT = getContainerForFixedLengthVector(VecVT);
7509     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
7510     if (IsVPTrunc) {
7511       MVT MaskContainerVT =
7512           getContainerForFixedLengthVector(Mask.getSimpleValueType());
7513       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
7514     }
7515   }
7516 
7517   if (!IsVPTrunc) {
7518     std::tie(Mask, VL) =
7519         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
7520   }
7521 
7522   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
7523   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
7524 
7525   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7526                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
7527   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7528                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
7529 
7530   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
7531   SDValue Trunc = DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne,
7532                               DAG.getUNDEF(ContainerVT), Mask, VL);
7533   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT,
7534                       {Trunc, SplatZero, DAG.getCondCode(ISD::SETNE),
7535                        DAG.getUNDEF(MaskContainerVT), Mask, VL});
7536   if (MaskVT.isFixedLengthVector())
7537     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
7538   return Trunc;
7539 }
7540 
7541 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
7542                                                   SelectionDAG &DAG) const {
7543   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
7544   SDLoc DL(Op);
7545 
7546   MVT VT = Op.getSimpleValueType();
7547   // Only custom-lower vector truncates
7548   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
7549 
7550   // Truncates to mask types are handled differently
7551   if (VT.getVectorElementType() == MVT::i1)
7552     return lowerVectorMaskTruncLike(Op, DAG);
7553 
7554   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
7555   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
7556   // truncate by one power of two at a time.
7557   MVT DstEltVT = VT.getVectorElementType();
7558 
7559   SDValue Src = Op.getOperand(0);
7560   MVT SrcVT = Src.getSimpleValueType();
7561   MVT SrcEltVT = SrcVT.getVectorElementType();
7562 
7563   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
7564          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
7565          "Unexpected vector truncate lowering");
7566 
7567   MVT ContainerVT = SrcVT;
7568   SDValue Mask, VL;
7569   if (IsVPTrunc) {
7570     Mask = Op.getOperand(1);
7571     VL = Op.getOperand(2);
7572   }
7573   if (SrcVT.isFixedLengthVector()) {
7574     ContainerVT = getContainerForFixedLengthVector(SrcVT);
7575     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
7576     if (IsVPTrunc) {
7577       MVT MaskVT = getMaskTypeFor(ContainerVT);
7578       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
7579     }
7580   }
7581 
7582   SDValue Result = Src;
7583   if (!IsVPTrunc) {
7584     std::tie(Mask, VL) =
7585         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
7586   }
7587 
7588   LLVMContext &Context = *DAG.getContext();
7589   const ElementCount Count = ContainerVT.getVectorElementCount();
7590   do {
7591     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
7592     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
7593     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
7594                          Mask, VL);
7595   } while (SrcEltVT != DstEltVT);
7596 
7597   if (SrcVT.isFixedLengthVector())
7598     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
7599 
7600   return Result;
7601 }
7602 
7603 SDValue
7604 RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(SDValue Op,
7605                                                     SelectionDAG &DAG) const {
7606   SDLoc DL(Op);
7607   SDValue Chain = Op.getOperand(0);
7608   SDValue Src = Op.getOperand(1);
7609   MVT VT = Op.getSimpleValueType();
7610   MVT SrcVT = Src.getSimpleValueType();
7611   MVT ContainerVT = VT;
7612   if (VT.isFixedLengthVector()) {
7613     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
7614     ContainerVT =
7615         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
7616     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
7617   }
7618 
7619   auto [Mask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
7620 
7621   // RVV can only widen/truncate fp to types double/half the size as the source.
7622   if ((VT.getVectorElementType() == MVT::f64 &&
7623        SrcVT.getVectorElementType() == MVT::f16) ||
7624       (VT.getVectorElementType() == MVT::f16 &&
7625        SrcVT.getVectorElementType() == MVT::f64)) {
7626     // For double rounding, the intermediate rounding should be round-to-odd.
7627     unsigned InterConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
7628                                 ? RISCVISD::STRICT_FP_EXTEND_VL
7629                                 : RISCVISD::STRICT_VFNCVT_ROD_VL;
7630     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
7631     Src = DAG.getNode(InterConvOpc, DL, DAG.getVTList(InterVT, MVT::Other),
7632                       Chain, Src, Mask, VL);
7633     Chain = Src.getValue(1);
7634   }
7635 
7636   unsigned ConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
7637                          ? RISCVISD::STRICT_FP_EXTEND_VL
7638                          : RISCVISD::STRICT_FP_ROUND_VL;
7639   SDValue Res = DAG.getNode(ConvOpc, DL, DAG.getVTList(ContainerVT, MVT::Other),
7640                             Chain, Src, Mask, VL);
7641   if (VT.isFixedLengthVector()) {
7642     // StrictFP operations have two result values. Their lowered result should
7643     // have same result count.
7644     SDValue SubVec = convertFromScalableVector(VT, Res, DAG, Subtarget);
7645     Res = DAG.getMergeValues({SubVec, Res.getValue(1)}, DL);
7646   }
7647   return Res;
7648 }
7649 
7650 SDValue
7651 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
7652                                                     SelectionDAG &DAG) const {
7653   bool IsVP =
7654       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
7655   bool IsExtend =
7656       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
7657   // RVV can only do truncate fp to types half the size as the source. We
7658   // custom-lower f64->f16 rounds via RVV's round-to-odd float
7659   // conversion instruction.
7660   SDLoc DL(Op);
7661   MVT VT = Op.getSimpleValueType();
7662 
7663   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
7664 
7665   SDValue Src = Op.getOperand(0);
7666   MVT SrcVT = Src.getSimpleValueType();
7667 
7668   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
7669                                      SrcVT.getVectorElementType() != MVT::f16);
7670   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
7671                                      SrcVT.getVectorElementType() != MVT::f64);
7672 
7673   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
7674 
7675   // Prepare any fixed-length vector operands.
7676   MVT ContainerVT = VT;
7677   SDValue Mask, VL;
7678   if (IsVP) {
7679     Mask = Op.getOperand(1);
7680     VL = Op.getOperand(2);
7681   }
7682   if (VT.isFixedLengthVector()) {
7683     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
7684     ContainerVT =
7685         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
7686     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
7687     if (IsVP) {
7688       MVT MaskVT = getMaskTypeFor(ContainerVT);
7689       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
7690     }
7691   }
7692 
7693   if (!IsVP)
7694     std::tie(Mask, VL) =
7695         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
7696 
7697   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
7698 
7699   if (IsDirectConv) {
7700     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
7701     if (VT.isFixedLengthVector())
7702       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
7703     return Src;
7704   }
7705 
7706   unsigned InterConvOpc =
7707       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
7708 
7709   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
7710   SDValue IntermediateConv =
7711       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
7712   SDValue Result =
7713       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
7714   if (VT.isFixedLengthVector())
7715     return convertFromScalableVector(VT, Result, DAG, Subtarget);
7716   return Result;
7717 }
7718 
7719 // Given a scalable vector type and an index into it, returns the type for the
7720 // smallest subvector that the index fits in. This can be used to reduce LMUL
7721 // for operations like vslidedown.
7722 //
7723 // E.g. With Zvl128b, index 3 in a nxv4i32 fits within the first nxv2i32.
7724 static std::optional<MVT>
7725 getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG,
7726                       const RISCVSubtarget &Subtarget) {
7727   assert(VecVT.isScalableVector());
7728   const unsigned EltSize = VecVT.getScalarSizeInBits();
7729   const unsigned VectorBitsMin = Subtarget.getRealMinVLen();
7730   const unsigned MinVLMAX = VectorBitsMin / EltSize;
7731   MVT SmallerVT;
7732   if (MaxIdx < MinVLMAX)
7733     SmallerVT = getLMUL1VT(VecVT);
7734   else if (MaxIdx < MinVLMAX * 2)
7735     SmallerVT = getLMUL1VT(VecVT).getDoubleNumVectorElementsVT();
7736   else if (MaxIdx < MinVLMAX * 4)
7737     SmallerVT = getLMUL1VT(VecVT)
7738                     .getDoubleNumVectorElementsVT()
7739                     .getDoubleNumVectorElementsVT();
7740   if (!SmallerVT.isValid() || !VecVT.bitsGT(SmallerVT))
7741     return std::nullopt;
7742   return SmallerVT;
7743 }
7744 
7745 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
7746 // first position of a vector, and that vector is slid up to the insert index.
7747 // By limiting the active vector length to index+1 and merging with the
7748 // original vector (with an undisturbed tail policy for elements >= VL), we
7749 // achieve the desired result of leaving all elements untouched except the one
7750 // at VL-1, which is replaced with the desired value.
7751 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
7752                                                     SelectionDAG &DAG) const {
7753   SDLoc DL(Op);
7754   MVT VecVT = Op.getSimpleValueType();
7755   SDValue Vec = Op.getOperand(0);
7756   SDValue Val = Op.getOperand(1);
7757   SDValue Idx = Op.getOperand(2);
7758 
7759   if (VecVT.getVectorElementType() == MVT::i1) {
7760     // FIXME: For now we just promote to an i8 vector and insert into that,
7761     // but this is probably not optimal.
7762     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
7763     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
7764     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
7765     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
7766   }
7767 
7768   MVT ContainerVT = VecVT;
7769   // If the operand is a fixed-length vector, convert to a scalable one.
7770   if (VecVT.isFixedLengthVector()) {
7771     ContainerVT = getContainerForFixedLengthVector(VecVT);
7772     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7773   }
7774 
7775   // If we know the index we're going to insert at, we can shrink Vec so that
7776   // we're performing the scalar inserts and slideup on a smaller LMUL.
7777   MVT OrigContainerVT = ContainerVT;
7778   SDValue OrigVec = Vec;
7779   SDValue AlignedIdx;
7780   if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx)) {
7781     const unsigned OrigIdx = IdxC->getZExtValue();
7782     // Do we know an upper bound on LMUL?
7783     if (auto ShrunkVT = getSmallestVTForIndex(ContainerVT, OrigIdx,
7784                                               DL, DAG, Subtarget)) {
7785       ContainerVT = *ShrunkVT;
7786       AlignedIdx = DAG.getVectorIdxConstant(0, DL);
7787     }
7788 
7789     // If we're compiling for an exact VLEN value, we can always perform
7790     // the insert in m1 as we can determine the register corresponding to
7791     // the index in the register group.
7792     const unsigned MinVLen = Subtarget.getRealMinVLen();
7793     const unsigned MaxVLen = Subtarget.getRealMaxVLen();
7794     const MVT M1VT = getLMUL1VT(ContainerVT);
7795     if (MinVLen == MaxVLen && ContainerVT.bitsGT(M1VT)) {
7796       EVT ElemVT = VecVT.getVectorElementType();
7797       unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
7798       unsigned RemIdx = OrigIdx % ElemsPerVReg;
7799       unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
7800       unsigned ExtractIdx =
7801           SubRegIdx * M1VT.getVectorElementCount().getKnownMinValue();
7802       AlignedIdx = DAG.getVectorIdxConstant(ExtractIdx, DL);
7803       Idx = DAG.getVectorIdxConstant(RemIdx, DL);
7804       ContainerVT = M1VT;
7805     }
7806 
7807     if (AlignedIdx)
7808       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
7809                         AlignedIdx);
7810   }
7811 
7812   MVT XLenVT = Subtarget.getXLenVT();
7813 
7814   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
7815   // Even i64-element vectors on RV32 can be lowered without scalar
7816   // legalization if the most-significant 32 bits of the value are not affected
7817   // by the sign-extension of the lower 32 bits.
7818   // TODO: We could also catch sign extensions of a 32-bit value.
7819   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
7820     const auto *CVal = cast<ConstantSDNode>(Val);
7821     if (isInt<32>(CVal->getSExtValue())) {
7822       IsLegalInsert = true;
7823       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
7824     }
7825   }
7826 
7827   auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
7828 
7829   SDValue ValInVec;
7830 
7831   if (IsLegalInsert) {
7832     unsigned Opc =
7833         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
7834     if (isNullConstant(Idx)) {
7835       if (!VecVT.isFloatingPoint())
7836         Val = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Val);
7837       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
7838 
7839       if (AlignedIdx)
7840         Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
7841                           Vec, AlignedIdx);
7842       if (!VecVT.isFixedLengthVector())
7843         return Vec;
7844       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
7845     }
7846     ValInVec = lowerScalarInsert(Val, VL, ContainerVT, DL, DAG, Subtarget);
7847   } else {
7848     // On RV32, i64-element vectors must be specially handled to place the
7849     // value at element 0, by using two vslide1down instructions in sequence on
7850     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
7851     // this.
7852     SDValue ValLo, ValHi;
7853     std::tie(ValLo, ValHi) = DAG.SplitScalar(Val, DL, MVT::i32, MVT::i32);
7854     MVT I32ContainerVT =
7855         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
7856     SDValue I32Mask =
7857         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
7858     // Limit the active VL to two.
7859     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
7860     // If the Idx is 0 we can insert directly into the vector.
7861     if (isNullConstant(Idx)) {
7862       // First slide in the lo value, then the hi in above it. We use slide1down
7863       // to avoid the register group overlap constraint of vslide1up.
7864       ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
7865                              Vec, Vec, ValLo, I32Mask, InsertI64VL);
7866       // If the source vector is undef don't pass along the tail elements from
7867       // the previous slide1down.
7868       SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
7869       ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
7870                              Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
7871       // Bitcast back to the right container type.
7872       ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
7873 
7874       if (AlignedIdx)
7875         ValInVec =
7876             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
7877                         ValInVec, AlignedIdx);
7878       if (!VecVT.isFixedLengthVector())
7879         return ValInVec;
7880       return convertFromScalableVector(VecVT, ValInVec, DAG, Subtarget);
7881     }
7882 
7883     // First slide in the lo value, then the hi in above it. We use slide1down
7884     // to avoid the register group overlap constraint of vslide1up.
7885     ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
7886                            DAG.getUNDEF(I32ContainerVT),
7887                            DAG.getUNDEF(I32ContainerVT), ValLo,
7888                            I32Mask, InsertI64VL);
7889     ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
7890                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
7891                            I32Mask, InsertI64VL);
7892     // Bitcast back to the right container type.
7893     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
7894   }
7895 
7896   // Now that the value is in a vector, slide it into position.
7897   SDValue InsertVL =
7898       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
7899 
7900   // Use tail agnostic policy if Idx is the last index of Vec.
7901   unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
7902   if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
7903       cast<ConstantSDNode>(Idx)->getZExtValue() + 1 ==
7904           VecVT.getVectorNumElements())
7905     Policy = RISCVII::TAIL_AGNOSTIC;
7906   SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
7907                                 Idx, Mask, InsertVL, Policy);
7908 
7909   if (AlignedIdx)
7910     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigContainerVT, OrigVec,
7911                           Slideup, AlignedIdx);
7912   if (!VecVT.isFixedLengthVector())
7913     return Slideup;
7914   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
7915 }
7916 
7917 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
7918 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
7919 // types this is done using VMV_X_S to allow us to glean information about the
7920 // sign bits of the result.
7921 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
7922                                                      SelectionDAG &DAG) const {
7923   SDLoc DL(Op);
7924   SDValue Idx = Op.getOperand(1);
7925   SDValue Vec = Op.getOperand(0);
7926   EVT EltVT = Op.getValueType();
7927   MVT VecVT = Vec.getSimpleValueType();
7928   MVT XLenVT = Subtarget.getXLenVT();
7929 
7930   if (VecVT.getVectorElementType() == MVT::i1) {
7931     // Use vfirst.m to extract the first bit.
7932     if (isNullConstant(Idx)) {
7933       MVT ContainerVT = VecVT;
7934       if (VecVT.isFixedLengthVector()) {
7935         ContainerVT = getContainerForFixedLengthVector(VecVT);
7936         Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7937       }
7938       auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
7939       SDValue Vfirst =
7940           DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL);
7941       SDValue Res = DAG.getSetCC(DL, XLenVT, Vfirst,
7942                                  DAG.getConstant(0, DL, XLenVT), ISD::SETEQ);
7943       return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
7944     }
7945     if (VecVT.isFixedLengthVector()) {
7946       unsigned NumElts = VecVT.getVectorNumElements();
7947       if (NumElts >= 8) {
7948         MVT WideEltVT;
7949         unsigned WidenVecLen;
7950         SDValue ExtractElementIdx;
7951         SDValue ExtractBitIdx;
7952         unsigned MaxEEW = Subtarget.getELen();
7953         MVT LargestEltVT = MVT::getIntegerVT(
7954             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
7955         if (NumElts <= LargestEltVT.getSizeInBits()) {
7956           assert(isPowerOf2_32(NumElts) &&
7957                  "the number of elements should be power of 2");
7958           WideEltVT = MVT::getIntegerVT(NumElts);
7959           WidenVecLen = 1;
7960           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
7961           ExtractBitIdx = Idx;
7962         } else {
7963           WideEltVT = LargestEltVT;
7964           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
7965           // extract element index = index / element width
7966           ExtractElementIdx = DAG.getNode(
7967               ISD::SRL, DL, XLenVT, Idx,
7968               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
7969           // mask bit index = index % element width
7970           ExtractBitIdx = DAG.getNode(
7971               ISD::AND, DL, XLenVT, Idx,
7972               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
7973         }
7974         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
7975         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
7976         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
7977                                          Vec, ExtractElementIdx);
7978         // Extract the bit from GPR.
7979         SDValue ShiftRight =
7980             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
7981         SDValue Res = DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
7982                                   DAG.getConstant(1, DL, XLenVT));
7983         return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
7984       }
7985     }
7986     // Otherwise, promote to an i8 vector and extract from that.
7987     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
7988     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
7989     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
7990   }
7991 
7992   // If this is a fixed vector, we need to convert it to a scalable vector.
7993   MVT ContainerVT = VecVT;
7994   if (VecVT.isFixedLengthVector()) {
7995     ContainerVT = getContainerForFixedLengthVector(VecVT);
7996     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7997   }
7998 
7999   // If we're compiling for an exact VLEN value and we have a known
8000   // constant index, we can always perform the extract in m1 (or
8001   // smaller) as we can determine the register corresponding to
8002   // the index in the register group.
8003   const unsigned MinVLen = Subtarget.getRealMinVLen();
8004   const unsigned MaxVLen = Subtarget.getRealMaxVLen();
8005   if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx);
8006       IdxC && MinVLen == MaxVLen &&
8007       VecVT.getSizeInBits().getKnownMinValue() > MinVLen) {
8008     MVT M1VT = getLMUL1VT(ContainerVT);
8009     unsigned OrigIdx = IdxC->getZExtValue();
8010     EVT ElemVT = VecVT.getVectorElementType();
8011     unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
8012     unsigned RemIdx = OrigIdx % ElemsPerVReg;
8013     unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
8014     unsigned ExtractIdx =
8015       SubRegIdx * M1VT.getVectorElementCount().getKnownMinValue();
8016     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, Vec,
8017                       DAG.getVectorIdxConstant(ExtractIdx, DL));
8018     Idx = DAG.getVectorIdxConstant(RemIdx, DL);
8019     ContainerVT = M1VT;
8020   }
8021 
8022   // Reduce the LMUL of our slidedown and vmv.x.s to the smallest LMUL which
8023   // contains our index.
8024   std::optional<uint64_t> MaxIdx;
8025   if (VecVT.isFixedLengthVector())
8026     MaxIdx = VecVT.getVectorNumElements() - 1;
8027   if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx))
8028     MaxIdx = IdxC->getZExtValue();
8029   if (MaxIdx) {
8030     if (auto SmallerVT =
8031             getSmallestVTForIndex(ContainerVT, *MaxIdx, DL, DAG, Subtarget)) {
8032       ContainerVT = *SmallerVT;
8033       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
8034                         DAG.getConstant(0, DL, XLenVT));
8035     }
8036   }
8037 
8038   // If after narrowing, the required slide is still greater than LMUL2,
8039   // fallback to generic expansion and go through the stack.  This is done
8040   // for a subtle reason: extracting *all* elements out of a vector is
8041   // widely expected to be linear in vector size, but because vslidedown
8042   // is linear in LMUL, performing N extracts using vslidedown becomes
8043   // O(n^2) / (VLEN/ETYPE) work.  On the surface, going through the stack
8044   // seems to have the same problem (the store is linear in LMUL), but the
8045   // generic expansion *memoizes* the store, and thus for many extracts of
8046   // the same vector we end up with one store and a bunch of loads.
8047   // TODO: We don't have the same code for insert_vector_elt because we
8048   // have BUILD_VECTOR and handle the degenerate case there.  Should we
8049   // consider adding an inverse BUILD_VECTOR node?
8050   MVT LMUL2VT = getLMUL1VT(ContainerVT).getDoubleNumVectorElementsVT();
8051   if (ContainerVT.bitsGT(LMUL2VT) && VecVT.isFixedLengthVector())
8052     return SDValue();
8053 
8054   // If the index is 0, the vector is already in the right position.
8055   if (!isNullConstant(Idx)) {
8056     // Use a VL of 1 to avoid processing more elements than we need.
8057     auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
8058     Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
8059                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
8060   }
8061 
8062   if (!EltVT.isInteger()) {
8063     // Floating-point extracts are handled in TableGen.
8064     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
8065                        DAG.getConstant(0, DL, XLenVT));
8066   }
8067 
8068   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
8069   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
8070 }
8071 
8072 // Some RVV intrinsics may claim that they want an integer operand to be
8073 // promoted or expanded.
8074 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
8075                                            const RISCVSubtarget &Subtarget) {
8076   assert((Op.getOpcode() == ISD::INTRINSIC_VOID ||
8077           Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
8078           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
8079          "Unexpected opcode");
8080 
8081   if (!Subtarget.hasVInstructions())
8082     return SDValue();
8083 
8084   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_VOID ||
8085                   Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
8086   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
8087 
8088   SDLoc DL(Op);
8089 
8090   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
8091       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
8092   if (!II || !II->hasScalarOperand())
8093     return SDValue();
8094 
8095   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
8096   assert(SplatOp < Op.getNumOperands());
8097 
8098   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
8099   SDValue &ScalarOp = Operands[SplatOp];
8100   MVT OpVT = ScalarOp.getSimpleValueType();
8101   MVT XLenVT = Subtarget.getXLenVT();
8102 
8103   // If this isn't a scalar, or its type is XLenVT we're done.
8104   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
8105     return SDValue();
8106 
8107   // Simplest case is that the operand needs to be promoted to XLenVT.
8108   if (OpVT.bitsLT(XLenVT)) {
8109     // If the operand is a constant, sign extend to increase our chances
8110     // of being able to use a .vi instruction. ANY_EXTEND would become a
8111     // a zero extend and the simm5 check in isel would fail.
8112     // FIXME: Should we ignore the upper bits in isel instead?
8113     unsigned ExtOpc =
8114         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
8115     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
8116     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
8117   }
8118 
8119   // Use the previous operand to get the vXi64 VT. The result might be a mask
8120   // VT for compares. Using the previous operand assumes that the previous
8121   // operand will never have a smaller element size than a scalar operand and
8122   // that a widening operation never uses SEW=64.
8123   // NOTE: If this fails the below assert, we can probably just find the
8124   // element count from any operand or result and use it to construct the VT.
8125   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
8126   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
8127 
8128   // The more complex case is when the scalar is larger than XLenVT.
8129   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
8130          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
8131 
8132   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
8133   // instruction to sign-extend since SEW>XLEN.
8134   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
8135     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
8136     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
8137   }
8138 
8139   switch (IntNo) {
8140   case Intrinsic::riscv_vslide1up:
8141   case Intrinsic::riscv_vslide1down:
8142   case Intrinsic::riscv_vslide1up_mask:
8143   case Intrinsic::riscv_vslide1down_mask: {
8144     // We need to special case these when the scalar is larger than XLen.
8145     unsigned NumOps = Op.getNumOperands();
8146     bool IsMasked = NumOps == 7;
8147 
8148     // Convert the vector source to the equivalent nxvXi32 vector.
8149     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
8150     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
8151     SDValue ScalarLo, ScalarHi;
8152     std::tie(ScalarLo, ScalarHi) =
8153         DAG.SplitScalar(ScalarOp, DL, MVT::i32, MVT::i32);
8154 
8155     // Double the VL since we halved SEW.
8156     SDValue AVL = getVLOperand(Op);
8157     SDValue I32VL;
8158 
8159     // Optimize for constant AVL
8160     if (isa<ConstantSDNode>(AVL)) {
8161       const auto [MinVLMAX, MaxVLMAX] =
8162           RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
8163 
8164       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
8165       if (AVLInt <= MinVLMAX) {
8166         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
8167       } else if (AVLInt >= 2 * MaxVLMAX) {
8168         // Just set vl to VLMAX in this situation
8169         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
8170         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
8171         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
8172         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
8173         SDValue SETVLMAX = DAG.getTargetConstant(
8174             Intrinsic::riscv_vsetvlimax, DL, MVT::i32);
8175         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
8176                             LMUL);
8177       } else {
8178         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
8179         // is related to the hardware implementation.
8180         // So let the following code handle
8181       }
8182     }
8183     if (!I32VL) {
8184       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
8185       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
8186       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
8187       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
8188       SDValue SETVL =
8189           DAG.getTargetConstant(Intrinsic::riscv_vsetvli, DL, MVT::i32);
8190       // Using vsetvli instruction to get actually used length which related to
8191       // the hardware implementation
8192       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
8193                                SEW, LMUL);
8194       I32VL =
8195           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
8196     }
8197 
8198     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
8199 
8200     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
8201     // instructions.
8202     SDValue Passthru;
8203     if (IsMasked)
8204       Passthru = DAG.getUNDEF(I32VT);
8205     else
8206       Passthru = DAG.getBitcast(I32VT, Operands[1]);
8207 
8208     if (IntNo == Intrinsic::riscv_vslide1up ||
8209         IntNo == Intrinsic::riscv_vslide1up_mask) {
8210       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
8211                         ScalarHi, I32Mask, I32VL);
8212       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
8213                         ScalarLo, I32Mask, I32VL);
8214     } else {
8215       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
8216                         ScalarLo, I32Mask, I32VL);
8217       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
8218                         ScalarHi, I32Mask, I32VL);
8219     }
8220 
8221     // Convert back to nxvXi64.
8222     Vec = DAG.getBitcast(VT, Vec);
8223 
8224     if (!IsMasked)
8225       return Vec;
8226     // Apply mask after the operation.
8227     SDValue Mask = Operands[NumOps - 3];
8228     SDValue MaskedOff = Operands[1];
8229     // Assume Policy operand is the last operand.
8230     uint64_t Policy =
8231         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
8232     // We don't need to select maskedoff if it's undef.
8233     if (MaskedOff.isUndef())
8234       return Vec;
8235     // TAMU
8236     if (Policy == RISCVII::TAIL_AGNOSTIC)
8237       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
8238                          AVL);
8239     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
8240     // It's fine because vmerge does not care mask policy.
8241     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
8242                        AVL);
8243   }
8244   }
8245 
8246   // We need to convert the scalar to a splat vector.
8247   SDValue VL = getVLOperand(Op);
8248   assert(VL.getValueType() == XLenVT);
8249   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
8250   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
8251 }
8252 
8253 // Lower the llvm.get.vector.length intrinsic to vsetvli. We only support
8254 // scalable vector llvm.get.vector.length for now.
8255 //
8256 // We need to convert from a scalable VF to a vsetvli with VLMax equal to
8257 // (vscale * VF). The vscale and VF are independent of element width. We use
8258 // SEW=8 for the vsetvli because it is the only element width that supports all
8259 // fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
8260 // (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
8261 // InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
8262 // SEW and LMUL are better for the surrounding vector instructions.
8263 static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
8264                                     const RISCVSubtarget &Subtarget) {
8265   MVT XLenVT = Subtarget.getXLenVT();
8266 
8267   // The smallest LMUL is only valid for the smallest element width.
8268   const unsigned ElementWidth = 8;
8269 
8270   // Determine the VF that corresponds to LMUL 1 for ElementWidth.
8271   unsigned LMul1VF = RISCV::RVVBitsPerBlock / ElementWidth;
8272   // We don't support VF==1 with ELEN==32.
8273   unsigned MinVF = RISCV::RVVBitsPerBlock / Subtarget.getELen();
8274 
8275   unsigned VF = N->getConstantOperandVal(2);
8276   assert(VF >= MinVF && VF <= (LMul1VF * 8) && isPowerOf2_32(VF) &&
8277          "Unexpected VF");
8278   (void)MinVF;
8279 
8280   bool Fractional = VF < LMul1VF;
8281   unsigned LMulVal = Fractional ? LMul1VF / VF : VF / LMul1VF;
8282   unsigned VLMUL = (unsigned)RISCVVType::encodeLMUL(LMulVal, Fractional);
8283   unsigned VSEW = RISCVVType::encodeSEW(ElementWidth);
8284 
8285   SDLoc DL(N);
8286 
8287   SDValue LMul = DAG.getTargetConstant(VLMUL, DL, XLenVT);
8288   SDValue Sew = DAG.getTargetConstant(VSEW, DL, XLenVT);
8289 
8290   SDValue AVL = DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
8291 
8292   SDValue ID = DAG.getTargetConstant(Intrinsic::riscv_vsetvli, DL, XLenVT);
8293   SDValue Res =
8294       DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, ID, AVL, Sew, LMul);
8295   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), Res);
8296 }
8297 
8298 static void getVCIXOperands(SDValue &Op, SelectionDAG &DAG,
8299                             SmallVector<SDValue> &Ops) {
8300   SDLoc DL(Op);
8301 
8302   const RISCVSubtarget &Subtarget =
8303       DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
8304   for (const SDValue &V : Op->op_values()) {
8305     EVT ValType = V.getValueType();
8306     if (ValType.isScalableVector() && ValType.isFloatingPoint()) {
8307       MVT InterimIVT =
8308           MVT::getVectorVT(MVT::getIntegerVT(ValType.getScalarSizeInBits()),
8309                            ValType.getVectorElementCount());
8310       Ops.push_back(DAG.getBitcast(InterimIVT, V));
8311     } else if (ValType.isFixedLengthVector()) {
8312       MVT OpContainerVT = getContainerForFixedLengthVector(
8313           DAG, V.getSimpleValueType(), Subtarget);
8314       Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget));
8315     } else
8316       Ops.push_back(V);
8317   }
8318 }
8319 
8320 // LMUL * VLEN should be greater than or equal to EGS * SEW
8321 static inline bool isValidEGW(int EGS, EVT VT,
8322                               const RISCVSubtarget &Subtarget) {
8323   return (Subtarget.getRealMinVLen() *
8324              VT.getSizeInBits().getKnownMinValue()) / RISCV::RVVBitsPerBlock >=
8325          EGS * VT.getScalarSizeInBits();
8326 }
8327 
8328 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
8329                                                      SelectionDAG &DAG) const {
8330   unsigned IntNo = Op.getConstantOperandVal(0);
8331   SDLoc DL(Op);
8332   MVT XLenVT = Subtarget.getXLenVT();
8333 
8334   switch (IntNo) {
8335   default:
8336     break; // Don't custom lower most intrinsics.
8337   case Intrinsic::thread_pointer: {
8338     EVT PtrVT = getPointerTy(DAG.getDataLayout());
8339     return DAG.getRegister(RISCV::X4, PtrVT);
8340   }
8341   case Intrinsic::riscv_orc_b:
8342   case Intrinsic::riscv_brev8:
8343   case Intrinsic::riscv_sha256sig0:
8344   case Intrinsic::riscv_sha256sig1:
8345   case Intrinsic::riscv_sha256sum0:
8346   case Intrinsic::riscv_sha256sum1:
8347   case Intrinsic::riscv_sm3p0:
8348   case Intrinsic::riscv_sm3p1: {
8349     unsigned Opc;
8350     switch (IntNo) {
8351     case Intrinsic::riscv_orc_b:      Opc = RISCVISD::ORC_B;      break;
8352     case Intrinsic::riscv_brev8:      Opc = RISCVISD::BREV8;      break;
8353     case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
8354     case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
8355     case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
8356     case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
8357     case Intrinsic::riscv_sm3p0:      Opc = RISCVISD::SM3P0;      break;
8358     case Intrinsic::riscv_sm3p1:      Opc = RISCVISD::SM3P1;      break;
8359     }
8360 
8361     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
8362       SDValue NewOp =
8363           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
8364       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp);
8365       return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
8366     }
8367 
8368     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
8369   }
8370   case Intrinsic::riscv_sm4ks:
8371   case Intrinsic::riscv_sm4ed: {
8372     unsigned Opc =
8373         IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
8374 
8375     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
8376       SDValue NewOp0 =
8377           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
8378       SDValue NewOp1 =
8379           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
8380       SDValue Res =
8381           DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, Op.getOperand(3));
8382       return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
8383     }
8384 
8385     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2),
8386                        Op.getOperand(3));
8387   }
8388   case Intrinsic::riscv_zip:
8389   case Intrinsic::riscv_unzip: {
8390     unsigned Opc =
8391         IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
8392     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
8393   }
8394   case Intrinsic::riscv_clmul:
8395     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
8396       SDValue NewOp0 =
8397           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
8398       SDValue NewOp1 =
8399           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
8400       SDValue Res = DAG.getNode(RISCVISD::CLMUL, DL, MVT::i64, NewOp0, NewOp1);
8401       return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
8402     }
8403     return DAG.getNode(RISCVISD::CLMUL, DL, XLenVT, Op.getOperand(1),
8404                        Op.getOperand(2));
8405   case Intrinsic::riscv_clmulh:
8406   case Intrinsic::riscv_clmulr: {
8407     unsigned Opc =
8408         IntNo == Intrinsic::riscv_clmulh ? RISCVISD::CLMULH : RISCVISD::CLMULR;
8409     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
8410       SDValue NewOp0 =
8411           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
8412       SDValue NewOp1 =
8413           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
8414       NewOp0 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0,
8415                            DAG.getConstant(32, DL, MVT::i64));
8416       NewOp1 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp1,
8417                            DAG.getConstant(32, DL, MVT::i64));
8418       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1);
8419       Res = DAG.getNode(ISD::SRL, DL, MVT::i64, Res,
8420                         DAG.getConstant(32, DL, MVT::i64));
8421       return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
8422     }
8423 
8424     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
8425   }
8426   case Intrinsic::experimental_get_vector_length:
8427     return lowerGetVectorLength(Op.getNode(), DAG, Subtarget);
8428   case Intrinsic::riscv_vmv_x_s: {
8429     SDValue Res = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Op.getOperand(1));
8430     return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Res);
8431   }
8432   case Intrinsic::riscv_vfmv_f_s:
8433     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(),
8434                        Op.getOperand(1), DAG.getConstant(0, DL, XLenVT));
8435   case Intrinsic::riscv_vmv_v_x:
8436     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
8437                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
8438                             Subtarget);
8439   case Intrinsic::riscv_vfmv_v_f:
8440     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
8441                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
8442   case Intrinsic::riscv_vmv_s_x: {
8443     SDValue Scalar = Op.getOperand(2);
8444 
8445     if (Scalar.getValueType().bitsLE(XLenVT)) {
8446       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
8447       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
8448                          Op.getOperand(1), Scalar, Op.getOperand(3));
8449     }
8450 
8451     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
8452 
8453     // This is an i64 value that lives in two scalar registers. We have to
8454     // insert this in a convoluted way. First we build vXi64 splat containing
8455     // the two values that we assemble using some bit math. Next we'll use
8456     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
8457     // to merge element 0 from our splat into the source vector.
8458     // FIXME: This is probably not the best way to do this, but it is
8459     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
8460     // point.
8461     //   sw lo, (a0)
8462     //   sw hi, 4(a0)
8463     //   vlse vX, (a0)
8464     //
8465     //   vid.v      vVid
8466     //   vmseq.vx   mMask, vVid, 0
8467     //   vmerge.vvm vDest, vSrc, vVal, mMask
8468     MVT VT = Op.getSimpleValueType();
8469     SDValue Vec = Op.getOperand(1);
8470     SDValue VL = getVLOperand(Op);
8471 
8472     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
8473     if (Op.getOperand(1).isUndef())
8474       return SplattedVal;
8475     SDValue SplattedIdx =
8476         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8477                     DAG.getConstant(0, DL, MVT::i32), VL);
8478 
8479     MVT MaskVT = getMaskTypeFor(VT);
8480     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
8481     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
8482     SDValue SelectCond =
8483         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT,
8484                     {VID, SplattedIdx, DAG.getCondCode(ISD::SETEQ),
8485                      DAG.getUNDEF(MaskVT), Mask, VL});
8486     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
8487                        Vec, VL);
8488   }
8489   // EGS * EEW >= 128 bits
8490   case Intrinsic::riscv_vaesdf_vv:
8491   case Intrinsic::riscv_vaesdf_vs:
8492   case Intrinsic::riscv_vaesdm_vv:
8493   case Intrinsic::riscv_vaesdm_vs:
8494   case Intrinsic::riscv_vaesef_vv:
8495   case Intrinsic::riscv_vaesef_vs:
8496   case Intrinsic::riscv_vaesem_vv:
8497   case Intrinsic::riscv_vaesem_vs:
8498   case Intrinsic::riscv_vaeskf1:
8499   case Intrinsic::riscv_vaeskf2:
8500   case Intrinsic::riscv_vaesz_vs:
8501   case Intrinsic::riscv_vsm4k:
8502   case Intrinsic::riscv_vsm4r_vv:
8503   case Intrinsic::riscv_vsm4r_vs: {
8504     if (!isValidEGW(4, Op.getSimpleValueType(), Subtarget) ||
8505         !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
8506         !isValidEGW(4, Op->getOperand(2).getSimpleValueType(), Subtarget))
8507       report_fatal_error("EGW should be greater than or equal to 4 * SEW.");
8508     return Op;
8509   }
8510   // EGS * EEW >= 256 bits
8511   case Intrinsic::riscv_vsm3c:
8512   case Intrinsic::riscv_vsm3me: {
8513     if (!isValidEGW(8, Op.getSimpleValueType(), Subtarget) ||
8514         !isValidEGW(8, Op->getOperand(1).getSimpleValueType(), Subtarget))
8515       report_fatal_error("EGW should be greater than or equal to 8 * SEW.");
8516     return Op;
8517   }
8518   // zvknha(SEW=32)/zvknhb(SEW=[32|64])
8519   case Intrinsic::riscv_vsha2ch:
8520   case Intrinsic::riscv_vsha2cl:
8521   case Intrinsic::riscv_vsha2ms: {
8522     if (Op->getSimpleValueType(0).getScalarSizeInBits() == 64 &&
8523         !Subtarget.hasStdExtZvknhb())
8524       report_fatal_error("SEW=64 needs Zvknhb to be enabled.");
8525     if (!isValidEGW(4, Op.getSimpleValueType(), Subtarget) ||
8526         !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
8527         !isValidEGW(4, Op->getOperand(2).getSimpleValueType(), Subtarget))
8528       report_fatal_error("EGW should be greater than or equal to 4 * SEW.");
8529     return Op;
8530   }
8531   case Intrinsic::riscv_sf_vc_v_x:
8532   case Intrinsic::riscv_sf_vc_v_i:
8533   case Intrinsic::riscv_sf_vc_v_xv:
8534   case Intrinsic::riscv_sf_vc_v_iv:
8535   case Intrinsic::riscv_sf_vc_v_vv:
8536   case Intrinsic::riscv_sf_vc_v_fv:
8537   case Intrinsic::riscv_sf_vc_v_xvv:
8538   case Intrinsic::riscv_sf_vc_v_ivv:
8539   case Intrinsic::riscv_sf_vc_v_vvv:
8540   case Intrinsic::riscv_sf_vc_v_fvv:
8541   case Intrinsic::riscv_sf_vc_v_xvw:
8542   case Intrinsic::riscv_sf_vc_v_ivw:
8543   case Intrinsic::riscv_sf_vc_v_vvw:
8544   case Intrinsic::riscv_sf_vc_v_fvw: {
8545     MVT VT = Op.getSimpleValueType();
8546 
8547     SmallVector<SDValue> Ops;
8548     getVCIXOperands(Op, DAG, Ops);
8549 
8550     MVT RetVT = VT;
8551     if (VT.isFixedLengthVector())
8552       RetVT = getContainerForFixedLengthVector(VT);
8553     else if (VT.isFloatingPoint())
8554       RetVT = MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits()),
8555                                VT.getVectorElementCount());
8556 
8557     SDValue NewNode = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetVT, Ops);
8558 
8559     if (VT.isFixedLengthVector())
8560       NewNode = convertFromScalableVector(VT, NewNode, DAG, Subtarget);
8561     else if (VT.isFloatingPoint())
8562       NewNode = DAG.getBitcast(VT, NewNode);
8563 
8564     if (Op == NewNode)
8565       break;
8566 
8567     return NewNode;
8568   }
8569   }
8570 
8571   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
8572 }
8573 
8574 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
8575                                                     SelectionDAG &DAG) const {
8576   unsigned IntNo = Op.getConstantOperandVal(1);
8577   switch (IntNo) {
8578   default:
8579     break;
8580   case Intrinsic::riscv_masked_strided_load: {
8581     SDLoc DL(Op);
8582     MVT XLenVT = Subtarget.getXLenVT();
8583 
8584     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
8585     // the selection of the masked intrinsics doesn't do this for us.
8586     SDValue Mask = Op.getOperand(5);
8587     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
8588 
8589     MVT VT = Op->getSimpleValueType(0);
8590     MVT ContainerVT = VT;
8591     if (VT.isFixedLengthVector())
8592       ContainerVT = getContainerForFixedLengthVector(VT);
8593 
8594     SDValue PassThru = Op.getOperand(2);
8595     if (!IsUnmasked) {
8596       MVT MaskVT = getMaskTypeFor(ContainerVT);
8597       if (VT.isFixedLengthVector()) {
8598         Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
8599         PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
8600       }
8601     }
8602 
8603     auto *Load = cast<MemIntrinsicSDNode>(Op);
8604     SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
8605     SDValue Ptr = Op.getOperand(3);
8606     SDValue Stride = Op.getOperand(4);
8607     SDValue Result, Chain;
8608 
8609     // TODO: We restrict this to unmasked loads currently in consideration of
8610     // the complexity of hanlding all falses masks.
8611     if (IsUnmasked && isNullConstant(Stride)) {
8612       MVT ScalarVT = ContainerVT.getVectorElementType();
8613       SDValue ScalarLoad =
8614           DAG.getExtLoad(ISD::ZEXTLOAD, DL, XLenVT, Load->getChain(), Ptr,
8615                          ScalarVT, Load->getMemOperand());
8616       Chain = ScalarLoad.getValue(1);
8617       Result = lowerScalarSplat(SDValue(), ScalarLoad, VL, ContainerVT, DL, DAG,
8618                                 Subtarget);
8619     } else {
8620       SDValue IntID = DAG.getTargetConstant(
8621           IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
8622           XLenVT);
8623 
8624       SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
8625       if (IsUnmasked)
8626         Ops.push_back(DAG.getUNDEF(ContainerVT));
8627       else
8628         Ops.push_back(PassThru);
8629       Ops.push_back(Ptr);
8630       Ops.push_back(Stride);
8631       if (!IsUnmasked)
8632         Ops.push_back(Mask);
8633       Ops.push_back(VL);
8634       if (!IsUnmasked) {
8635         SDValue Policy =
8636             DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
8637         Ops.push_back(Policy);
8638       }
8639 
8640       SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
8641       Result =
8642           DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
8643                                   Load->getMemoryVT(), Load->getMemOperand());
8644       Chain = Result.getValue(1);
8645     }
8646     if (VT.isFixedLengthVector())
8647       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
8648     return DAG.getMergeValues({Result, Chain}, DL);
8649   }
8650   case Intrinsic::riscv_seg2_load:
8651   case Intrinsic::riscv_seg3_load:
8652   case Intrinsic::riscv_seg4_load:
8653   case Intrinsic::riscv_seg5_load:
8654   case Intrinsic::riscv_seg6_load:
8655   case Intrinsic::riscv_seg7_load:
8656   case Intrinsic::riscv_seg8_load: {
8657     SDLoc DL(Op);
8658     static const Intrinsic::ID VlsegInts[7] = {
8659         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
8660         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
8661         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
8662         Intrinsic::riscv_vlseg8};
8663     unsigned NF = Op->getNumValues() - 1;
8664     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
8665     MVT XLenVT = Subtarget.getXLenVT();
8666     MVT VT = Op->getSimpleValueType(0);
8667     MVT ContainerVT = getContainerForFixedLengthVector(VT);
8668 
8669     SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
8670                          Subtarget);
8671     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
8672     auto *Load = cast<MemIntrinsicSDNode>(Op);
8673     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
8674     ContainerVTs.push_back(MVT::Other);
8675     SDVTList VTs = DAG.getVTList(ContainerVTs);
8676     SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID};
8677     Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT));
8678     Ops.push_back(Op.getOperand(2));
8679     Ops.push_back(VL);
8680     SDValue Result =
8681         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
8682                                 Load->getMemoryVT(), Load->getMemOperand());
8683     SmallVector<SDValue, 9> Results;
8684     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
8685       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
8686                                                   DAG, Subtarget));
8687     Results.push_back(Result.getValue(NF));
8688     return DAG.getMergeValues(Results, DL);
8689   }
8690   case Intrinsic::riscv_sf_vc_v_x_se:
8691   case Intrinsic::riscv_sf_vc_v_i_se:
8692   case Intrinsic::riscv_sf_vc_v_xv_se:
8693   case Intrinsic::riscv_sf_vc_v_iv_se:
8694   case Intrinsic::riscv_sf_vc_v_vv_se:
8695   case Intrinsic::riscv_sf_vc_v_fv_se:
8696   case Intrinsic::riscv_sf_vc_v_xvv_se:
8697   case Intrinsic::riscv_sf_vc_v_ivv_se:
8698   case Intrinsic::riscv_sf_vc_v_vvv_se:
8699   case Intrinsic::riscv_sf_vc_v_fvv_se:
8700   case Intrinsic::riscv_sf_vc_v_xvw_se:
8701   case Intrinsic::riscv_sf_vc_v_ivw_se:
8702   case Intrinsic::riscv_sf_vc_v_vvw_se:
8703   case Intrinsic::riscv_sf_vc_v_fvw_se: {
8704     MVT VT = Op.getSimpleValueType();
8705     SDLoc DL(Op);
8706     SmallVector<SDValue> Ops;
8707     getVCIXOperands(Op, DAG, Ops);
8708 
8709     MVT RetVT = VT;
8710     if (VT.isFixedLengthVector())
8711       RetVT = getContainerForFixedLengthVector(VT);
8712     else if (VT.isFloatingPoint())
8713       RetVT = MVT::getVectorVT(MVT::getIntegerVT(RetVT.getScalarSizeInBits()),
8714                                RetVT.getVectorElementCount());
8715 
8716     SDVTList VTs = DAG.getVTList({RetVT, MVT::Other});
8717     SDValue NewNode = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops);
8718 
8719     if (VT.isFixedLengthVector()) {
8720       SDValue FixedVector =
8721           convertFromScalableVector(VT, NewNode, DAG, Subtarget);
8722       NewNode = DAG.getMergeValues({FixedVector, NewNode.getValue(1)}, DL);
8723     } else if (VT.isFloatingPoint()) {
8724       SDValue BitCast = DAG.getBitcast(VT, NewNode.getValue(0));
8725       NewNode = DAG.getMergeValues({BitCast, NewNode.getValue(1)}, DL);
8726     }
8727 
8728     if (Op == NewNode)
8729       break;
8730 
8731     return NewNode;
8732   }
8733   }
8734 
8735   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
8736 }
8737 
8738 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
8739                                                  SelectionDAG &DAG) const {
8740   unsigned IntNo = Op.getConstantOperandVal(1);
8741   switch (IntNo) {
8742   default:
8743     break;
8744   case Intrinsic::riscv_masked_strided_store: {
8745     SDLoc DL(Op);
8746     MVT XLenVT = Subtarget.getXLenVT();
8747 
8748     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
8749     // the selection of the masked intrinsics doesn't do this for us.
8750     SDValue Mask = Op.getOperand(5);
8751     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
8752 
8753     SDValue Val = Op.getOperand(2);
8754     MVT VT = Val.getSimpleValueType();
8755     MVT ContainerVT = VT;
8756     if (VT.isFixedLengthVector()) {
8757       ContainerVT = getContainerForFixedLengthVector(VT);
8758       Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
8759     }
8760     if (!IsUnmasked) {
8761       MVT MaskVT = getMaskTypeFor(ContainerVT);
8762       if (VT.isFixedLengthVector())
8763         Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
8764     }
8765 
8766     SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
8767 
8768     SDValue IntID = DAG.getTargetConstant(
8769         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
8770         XLenVT);
8771 
8772     auto *Store = cast<MemIntrinsicSDNode>(Op);
8773     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
8774     Ops.push_back(Val);
8775     Ops.push_back(Op.getOperand(3)); // Ptr
8776     Ops.push_back(Op.getOperand(4)); // Stride
8777     if (!IsUnmasked)
8778       Ops.push_back(Mask);
8779     Ops.push_back(VL);
8780 
8781     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
8782                                    Ops, Store->getMemoryVT(),
8783                                    Store->getMemOperand());
8784   }
8785   case Intrinsic::riscv_seg2_store:
8786   case Intrinsic::riscv_seg3_store:
8787   case Intrinsic::riscv_seg4_store:
8788   case Intrinsic::riscv_seg5_store:
8789   case Intrinsic::riscv_seg6_store:
8790   case Intrinsic::riscv_seg7_store:
8791   case Intrinsic::riscv_seg8_store: {
8792     SDLoc DL(Op);
8793     static const Intrinsic::ID VssegInts[] = {
8794         Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
8795         Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
8796         Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
8797         Intrinsic::riscv_vsseg8};
8798     // Operands are (chain, int_id, vec*, ptr, vl)
8799     unsigned NF = Op->getNumOperands() - 4;
8800     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
8801     MVT XLenVT = Subtarget.getXLenVT();
8802     MVT VT = Op->getOperand(2).getSimpleValueType();
8803     MVT ContainerVT = getContainerForFixedLengthVector(VT);
8804 
8805     SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
8806                          Subtarget);
8807     SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT);
8808     SDValue Ptr = Op->getOperand(NF + 2);
8809 
8810     auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(Op);
8811     SmallVector<SDValue, 12> Ops = {FixedIntrinsic->getChain(), IntID};
8812     for (unsigned i = 0; i < NF; i++)
8813       Ops.push_back(convertToScalableVector(
8814           ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget));
8815     Ops.append({Ptr, VL});
8816 
8817     return DAG.getMemIntrinsicNode(
8818         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Ops,
8819         FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
8820   }
8821   case Intrinsic::riscv_sf_vc_x_se_e8mf8:
8822   case Intrinsic::riscv_sf_vc_x_se_e8mf4:
8823   case Intrinsic::riscv_sf_vc_x_se_e8mf2:
8824   case Intrinsic::riscv_sf_vc_x_se_e8m1:
8825   case Intrinsic::riscv_sf_vc_x_se_e8m2:
8826   case Intrinsic::riscv_sf_vc_x_se_e8m4:
8827   case Intrinsic::riscv_sf_vc_x_se_e8m8:
8828   case Intrinsic::riscv_sf_vc_x_se_e16mf4:
8829   case Intrinsic::riscv_sf_vc_x_se_e16mf2:
8830   case Intrinsic::riscv_sf_vc_x_se_e16m1:
8831   case Intrinsic::riscv_sf_vc_x_se_e16m2:
8832   case Intrinsic::riscv_sf_vc_x_se_e16m4:
8833   case Intrinsic::riscv_sf_vc_x_se_e16m8:
8834   case Intrinsic::riscv_sf_vc_x_se_e32mf2:
8835   case Intrinsic::riscv_sf_vc_x_se_e32m1:
8836   case Intrinsic::riscv_sf_vc_x_se_e32m2:
8837   case Intrinsic::riscv_sf_vc_x_se_e32m4:
8838   case Intrinsic::riscv_sf_vc_x_se_e32m8:
8839   case Intrinsic::riscv_sf_vc_x_se_e64m1:
8840   case Intrinsic::riscv_sf_vc_x_se_e64m2:
8841   case Intrinsic::riscv_sf_vc_x_se_e64m4:
8842   case Intrinsic::riscv_sf_vc_x_se_e64m8:
8843   case Intrinsic::riscv_sf_vc_i_se_e8mf8:
8844   case Intrinsic::riscv_sf_vc_i_se_e8mf4:
8845   case Intrinsic::riscv_sf_vc_i_se_e8mf2:
8846   case Intrinsic::riscv_sf_vc_i_se_e8m1:
8847   case Intrinsic::riscv_sf_vc_i_se_e8m2:
8848   case Intrinsic::riscv_sf_vc_i_se_e8m4:
8849   case Intrinsic::riscv_sf_vc_i_se_e8m8:
8850   case Intrinsic::riscv_sf_vc_i_se_e16mf4:
8851   case Intrinsic::riscv_sf_vc_i_se_e16mf2:
8852   case Intrinsic::riscv_sf_vc_i_se_e16m1:
8853   case Intrinsic::riscv_sf_vc_i_se_e16m2:
8854   case Intrinsic::riscv_sf_vc_i_se_e16m4:
8855   case Intrinsic::riscv_sf_vc_i_se_e16m8:
8856   case Intrinsic::riscv_sf_vc_i_se_e32mf2:
8857   case Intrinsic::riscv_sf_vc_i_se_e32m1:
8858   case Intrinsic::riscv_sf_vc_i_se_e32m2:
8859   case Intrinsic::riscv_sf_vc_i_se_e32m4:
8860   case Intrinsic::riscv_sf_vc_i_se_e32m8:
8861   case Intrinsic::riscv_sf_vc_i_se_e64m1:
8862   case Intrinsic::riscv_sf_vc_i_se_e64m2:
8863   case Intrinsic::riscv_sf_vc_i_se_e64m4:
8864   case Intrinsic::riscv_sf_vc_i_se_e64m8:
8865   case Intrinsic::riscv_sf_vc_xv_se:
8866   case Intrinsic::riscv_sf_vc_iv_se:
8867   case Intrinsic::riscv_sf_vc_vv_se:
8868   case Intrinsic::riscv_sf_vc_fv_se:
8869   case Intrinsic::riscv_sf_vc_xvv_se:
8870   case Intrinsic::riscv_sf_vc_ivv_se:
8871   case Intrinsic::riscv_sf_vc_vvv_se:
8872   case Intrinsic::riscv_sf_vc_fvv_se:
8873   case Intrinsic::riscv_sf_vc_xvw_se:
8874   case Intrinsic::riscv_sf_vc_ivw_se:
8875   case Intrinsic::riscv_sf_vc_vvw_se:
8876   case Intrinsic::riscv_sf_vc_fvw_se: {
8877     SmallVector<SDValue> Ops;
8878     getVCIXOperands(Op, DAG, Ops);
8879 
8880     SDValue NewNode =
8881         DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops);
8882 
8883     if (Op == NewNode)
8884       break;
8885 
8886     return NewNode;
8887   }
8888   }
8889 
8890   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
8891 }
8892 
8893 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
8894   switch (ISDOpcode) {
8895   default:
8896     llvm_unreachable("Unhandled reduction");
8897   case ISD::VP_REDUCE_ADD:
8898   case ISD::VECREDUCE_ADD:
8899     return RISCVISD::VECREDUCE_ADD_VL;
8900   case ISD::VP_REDUCE_UMAX:
8901   case ISD::VECREDUCE_UMAX:
8902     return RISCVISD::VECREDUCE_UMAX_VL;
8903   case ISD::VP_REDUCE_SMAX:
8904   case ISD::VECREDUCE_SMAX:
8905     return RISCVISD::VECREDUCE_SMAX_VL;
8906   case ISD::VP_REDUCE_UMIN:
8907   case ISD::VECREDUCE_UMIN:
8908     return RISCVISD::VECREDUCE_UMIN_VL;
8909   case ISD::VP_REDUCE_SMIN:
8910   case ISD::VECREDUCE_SMIN:
8911     return RISCVISD::VECREDUCE_SMIN_VL;
8912   case ISD::VP_REDUCE_AND:
8913   case ISD::VECREDUCE_AND:
8914     return RISCVISD::VECREDUCE_AND_VL;
8915   case ISD::VP_REDUCE_OR:
8916   case ISD::VECREDUCE_OR:
8917     return RISCVISD::VECREDUCE_OR_VL;
8918   case ISD::VP_REDUCE_XOR:
8919   case ISD::VECREDUCE_XOR:
8920     return RISCVISD::VECREDUCE_XOR_VL;
8921   case ISD::VP_REDUCE_FADD:
8922     return RISCVISD::VECREDUCE_FADD_VL;
8923   case ISD::VP_REDUCE_SEQ_FADD:
8924     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
8925   case ISD::VP_REDUCE_FMAX:
8926     return RISCVISD::VECREDUCE_FMAX_VL;
8927   case ISD::VP_REDUCE_FMIN:
8928     return RISCVISD::VECREDUCE_FMIN_VL;
8929   }
8930 
8931 }
8932 
8933 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
8934                                                          SelectionDAG &DAG,
8935                                                          bool IsVP) const {
8936   SDLoc DL(Op);
8937   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
8938   MVT VecVT = Vec.getSimpleValueType();
8939   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
8940           Op.getOpcode() == ISD::VECREDUCE_OR ||
8941           Op.getOpcode() == ISD::VECREDUCE_XOR ||
8942           Op.getOpcode() == ISD::VP_REDUCE_AND ||
8943           Op.getOpcode() == ISD::VP_REDUCE_OR ||
8944           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
8945          "Unexpected reduction lowering");
8946 
8947   MVT XLenVT = Subtarget.getXLenVT();
8948 
8949   MVT ContainerVT = VecVT;
8950   if (VecVT.isFixedLengthVector()) {
8951     ContainerVT = getContainerForFixedLengthVector(VecVT);
8952     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
8953   }
8954 
8955   SDValue Mask, VL;
8956   if (IsVP) {
8957     Mask = Op.getOperand(2);
8958     VL = Op.getOperand(3);
8959   } else {
8960     std::tie(Mask, VL) =
8961         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
8962   }
8963 
8964   unsigned BaseOpc;
8965   ISD::CondCode CC;
8966   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
8967 
8968   switch (Op.getOpcode()) {
8969   default:
8970     llvm_unreachable("Unhandled reduction");
8971   case ISD::VECREDUCE_AND:
8972   case ISD::VP_REDUCE_AND: {
8973     // vcpop ~x == 0
8974     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
8975     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
8976     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
8977     CC = ISD::SETEQ;
8978     BaseOpc = ISD::AND;
8979     break;
8980   }
8981   case ISD::VECREDUCE_OR:
8982   case ISD::VP_REDUCE_OR:
8983     // vcpop x != 0
8984     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
8985     CC = ISD::SETNE;
8986     BaseOpc = ISD::OR;
8987     break;
8988   case ISD::VECREDUCE_XOR:
8989   case ISD::VP_REDUCE_XOR: {
8990     // ((vcpop x) & 1) != 0
8991     SDValue One = DAG.getConstant(1, DL, XLenVT);
8992     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
8993     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
8994     CC = ISD::SETNE;
8995     BaseOpc = ISD::XOR;
8996     break;
8997   }
8998   }
8999 
9000   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
9001   SetCC = DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), SetCC);
9002 
9003   if (!IsVP)
9004     return SetCC;
9005 
9006   // Now include the start value in the operation.
9007   // Note that we must return the start value when no elements are operated
9008   // upon. The vcpop instructions we've emitted in each case above will return
9009   // 0 for an inactive vector, and so we've already received the neutral value:
9010   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
9011   // can simply include the start value.
9012   return DAG.getNode(BaseOpc, DL, Op.getValueType(), SetCC, Op.getOperand(0));
9013 }
9014 
9015 static bool isNonZeroAVL(SDValue AVL) {
9016   auto *RegisterAVL = dyn_cast<RegisterSDNode>(AVL);
9017   auto *ImmAVL = dyn_cast<ConstantSDNode>(AVL);
9018   return (RegisterAVL && RegisterAVL->getReg() == RISCV::X0) ||
9019          (ImmAVL && ImmAVL->getZExtValue() >= 1);
9020 }
9021 
9022 /// Helper to lower a reduction sequence of the form:
9023 /// scalar = reduce_op vec, scalar_start
9024 static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
9025                                  SDValue StartValue, SDValue Vec, SDValue Mask,
9026                                  SDValue VL, const SDLoc &DL, SelectionDAG &DAG,
9027                                  const RISCVSubtarget &Subtarget) {
9028   const MVT VecVT = Vec.getSimpleValueType();
9029   const MVT M1VT = getLMUL1VT(VecVT);
9030   const MVT XLenVT = Subtarget.getXLenVT();
9031   const bool NonZeroAVL = isNonZeroAVL(VL);
9032 
9033   // The reduction needs an LMUL1 input; do the splat at either LMUL1
9034   // or the original VT if fractional.
9035   auto InnerVT = VecVT.bitsLE(M1VT) ? VecVT : M1VT;
9036   // We reuse the VL of the reduction to reduce vsetvli toggles if we can
9037   // prove it is non-zero.  For the AVL=0 case, we need the scalar to
9038   // be the result of the reduction operation.
9039   auto InnerVL = NonZeroAVL ? VL : DAG.getConstant(1, DL, XLenVT);
9040   SDValue InitialValue = lowerScalarInsert(StartValue, InnerVL, InnerVT, DL,
9041                                            DAG, Subtarget);
9042   if (M1VT != InnerVT)
9043     InitialValue = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, M1VT,
9044                                DAG.getUNDEF(M1VT),
9045                                InitialValue, DAG.getConstant(0, DL, XLenVT));
9046   SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue;
9047   SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
9048   SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
9049   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops);
9050   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
9051                      DAG.getConstant(0, DL, XLenVT));
9052 }
9053 
9054 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
9055                                             SelectionDAG &DAG) const {
9056   SDLoc DL(Op);
9057   SDValue Vec = Op.getOperand(0);
9058   EVT VecEVT = Vec.getValueType();
9059 
9060   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
9061 
9062   // Due to ordering in legalize types we may have a vector type that needs to
9063   // be split. Do that manually so we can get down to a legal type.
9064   while (getTypeAction(*DAG.getContext(), VecEVT) ==
9065          TargetLowering::TypeSplitVector) {
9066     auto [Lo, Hi] = DAG.SplitVector(Vec, DL);
9067     VecEVT = Lo.getValueType();
9068     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
9069   }
9070 
9071   // TODO: The type may need to be widened rather than split. Or widened before
9072   // it can be split.
9073   if (!isTypeLegal(VecEVT))
9074     return SDValue();
9075 
9076   MVT VecVT = VecEVT.getSimpleVT();
9077   MVT VecEltVT = VecVT.getVectorElementType();
9078   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
9079 
9080   MVT ContainerVT = VecVT;
9081   if (VecVT.isFixedLengthVector()) {
9082     ContainerVT = getContainerForFixedLengthVector(VecVT);
9083     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
9084   }
9085 
9086   auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
9087 
9088   SDValue StartV = DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
9089   switch (BaseOpc) {
9090   case ISD::AND:
9091   case ISD::OR:
9092   case ISD::UMAX:
9093   case ISD::UMIN:
9094   case ISD::SMAX:
9095   case ISD::SMIN:
9096     MVT XLenVT = Subtarget.getXLenVT();
9097     StartV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Vec,
9098                          DAG.getConstant(0, DL, XLenVT));
9099   }
9100   return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), StartV, Vec,
9101                            Mask, VL, DL, DAG, Subtarget);
9102 }
9103 
9104 // Given a reduction op, this function returns the matching reduction opcode,
9105 // the vector SDValue and the scalar SDValue required to lower this to a
9106 // RISCVISD node.
9107 static std::tuple<unsigned, SDValue, SDValue>
9108 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT,
9109                                const RISCVSubtarget &Subtarget) {
9110   SDLoc DL(Op);
9111   auto Flags = Op->getFlags();
9112   unsigned Opcode = Op.getOpcode();
9113   switch (Opcode) {
9114   default:
9115     llvm_unreachable("Unhandled reduction");
9116   case ISD::VECREDUCE_FADD: {
9117     // Use positive zero if we can. It is cheaper to materialize.
9118     SDValue Zero =
9119         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
9120     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
9121   }
9122   case ISD::VECREDUCE_SEQ_FADD:
9123     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
9124                            Op.getOperand(0));
9125   case ISD::VECREDUCE_FMIN:
9126   case ISD::VECREDUCE_FMAX: {
9127     MVT XLenVT = Subtarget.getXLenVT();
9128     SDValue Front =
9129         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op.getOperand(0),
9130                     DAG.getConstant(0, DL, XLenVT));
9131     unsigned RVVOpc = (Opcode == ISD::VECREDUCE_FMIN)
9132                           ? RISCVISD::VECREDUCE_FMIN_VL
9133                           : RISCVISD::VECREDUCE_FMAX_VL;
9134     return std::make_tuple(RVVOpc, Op.getOperand(0), Front);
9135   }
9136   }
9137 }
9138 
9139 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
9140                                               SelectionDAG &DAG) const {
9141   SDLoc DL(Op);
9142   MVT VecEltVT = Op.getSimpleValueType();
9143 
9144   unsigned RVVOpcode;
9145   SDValue VectorVal, ScalarVal;
9146   std::tie(RVVOpcode, VectorVal, ScalarVal) =
9147       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT, Subtarget);
9148   MVT VecVT = VectorVal.getSimpleValueType();
9149 
9150   MVT ContainerVT = VecVT;
9151   if (VecVT.isFixedLengthVector()) {
9152     ContainerVT = getContainerForFixedLengthVector(VecVT);
9153     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
9154   }
9155 
9156   auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
9157   return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), ScalarVal,
9158                            VectorVal, Mask, VL, DL, DAG, Subtarget);
9159 }
9160 
9161 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
9162                                            SelectionDAG &DAG) const {
9163   SDLoc DL(Op);
9164   SDValue Vec = Op.getOperand(1);
9165   EVT VecEVT = Vec.getValueType();
9166 
9167   // TODO: The type may need to be widened rather than split. Or widened before
9168   // it can be split.
9169   if (!isTypeLegal(VecEVT))
9170     return SDValue();
9171 
9172   MVT VecVT = VecEVT.getSimpleVT();
9173   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
9174 
9175   if (VecVT.isFixedLengthVector()) {
9176     auto ContainerVT = getContainerForFixedLengthVector(VecVT);
9177     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
9178   }
9179 
9180   SDValue VL = Op.getOperand(3);
9181   SDValue Mask = Op.getOperand(2);
9182   return lowerReductionSeq(RVVOpcode, Op.getSimpleValueType(), Op.getOperand(0),
9183                            Vec, Mask, VL, DL, DAG, Subtarget);
9184 }
9185 
9186 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
9187                                                    SelectionDAG &DAG) const {
9188   SDValue Vec = Op.getOperand(0);
9189   SDValue SubVec = Op.getOperand(1);
9190   MVT VecVT = Vec.getSimpleValueType();
9191   MVT SubVecVT = SubVec.getSimpleValueType();
9192 
9193   SDLoc DL(Op);
9194   MVT XLenVT = Subtarget.getXLenVT();
9195   unsigned OrigIdx = Op.getConstantOperandVal(2);
9196   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9197 
9198   // We don't have the ability to slide mask vectors up indexed by their i1
9199   // elements; the smallest we can do is i8. Often we are able to bitcast to
9200   // equivalent i8 vectors. Note that when inserting a fixed-length vector
9201   // into a scalable one, we might not necessarily have enough scalable
9202   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
9203   if (SubVecVT.getVectorElementType() == MVT::i1 &&
9204       (OrigIdx != 0 || !Vec.isUndef())) {
9205     if (VecVT.getVectorMinNumElements() >= 8 &&
9206         SubVecVT.getVectorMinNumElements() >= 8) {
9207       assert(OrigIdx % 8 == 0 && "Invalid index");
9208       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
9209              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
9210              "Unexpected mask vector lowering");
9211       OrigIdx /= 8;
9212       SubVecVT =
9213           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
9214                            SubVecVT.isScalableVector());
9215       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
9216                                VecVT.isScalableVector());
9217       Vec = DAG.getBitcast(VecVT, Vec);
9218       SubVec = DAG.getBitcast(SubVecVT, SubVec);
9219     } else {
9220       // We can't slide this mask vector up indexed by its i1 elements.
9221       // This poses a problem when we wish to insert a scalable vector which
9222       // can't be re-expressed as a larger type. Just choose the slow path and
9223       // extend to a larger type, then truncate back down.
9224       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
9225       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
9226       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
9227       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
9228       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
9229                         Op.getOperand(2));
9230       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
9231       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
9232     }
9233   }
9234 
9235   // If the subvector vector is a fixed-length type, we cannot use subregister
9236   // manipulation to simplify the codegen; we don't know which register of a
9237   // LMUL group contains the specific subvector as we only know the minimum
9238   // register size. Therefore we must slide the vector group up the full
9239   // amount.
9240   if (SubVecVT.isFixedLengthVector()) {
9241     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
9242       return Op;
9243     MVT ContainerVT = VecVT;
9244     if (VecVT.isFixedLengthVector()) {
9245       ContainerVT = getContainerForFixedLengthVector(VecVT);
9246       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
9247     }
9248 
9249     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
9250       SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
9251                            DAG.getUNDEF(ContainerVT), SubVec,
9252                            DAG.getConstant(0, DL, XLenVT));
9253       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
9254       return DAG.getBitcast(Op.getValueType(), SubVec);
9255     }
9256 
9257     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
9258                          DAG.getUNDEF(ContainerVT), SubVec,
9259                          DAG.getConstant(0, DL, XLenVT));
9260     SDValue Mask =
9261         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
9262     // Set the vector length to only the number of elements we care about. Note
9263     // that for slideup this includes the offset.
9264     unsigned EndIndex = OrigIdx + SubVecVT.getVectorNumElements();
9265     SDValue VL = getVLOp(EndIndex, ContainerVT, DL, DAG, Subtarget);
9266 
9267     // Use tail agnostic policy if we're inserting over Vec's tail.
9268     unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
9269     if (VecVT.isFixedLengthVector() && EndIndex == VecVT.getVectorNumElements())
9270       Policy = RISCVII::TAIL_AGNOSTIC;
9271 
9272     // If we're inserting into the lowest elements, use a tail undisturbed
9273     // vmv.v.v.
9274     if (OrigIdx == 0) {
9275       SubVec =
9276           DAG.getNode(RISCVISD::VMV_V_V_VL, DL, ContainerVT, Vec, SubVec, VL);
9277     } else {
9278       SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
9279       SubVec = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, SubVec,
9280                            SlideupAmt, Mask, VL, Policy);
9281     }
9282 
9283     if (VecVT.isFixedLengthVector())
9284       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
9285     return DAG.getBitcast(Op.getValueType(), SubVec);
9286   }
9287 
9288   unsigned SubRegIdx, RemIdx;
9289   std::tie(SubRegIdx, RemIdx) =
9290       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
9291           VecVT, SubVecVT, OrigIdx, TRI);
9292 
9293   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
9294   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
9295                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
9296                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
9297 
9298   // 1. If the Idx has been completely eliminated and this subvector's size is
9299   // a vector register or a multiple thereof, or the surrounding elements are
9300   // undef, then this is a subvector insert which naturally aligns to a vector
9301   // register. These can easily be handled using subregister manipulation.
9302   // 2. If the subvector is smaller than a vector register, then the insertion
9303   // must preserve the undisturbed elements of the register. We do this by
9304   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
9305   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
9306   // subvector within the vector register, and an INSERT_SUBVECTOR of that
9307   // LMUL=1 type back into the larger vector (resolving to another subregister
9308   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
9309   // to avoid allocating a large register group to hold our subvector.
9310   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
9311     return Op;
9312 
9313   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
9314   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
9315   // (in our case undisturbed). This means we can set up a subvector insertion
9316   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
9317   // size of the subvector.
9318   MVT InterSubVT = VecVT;
9319   SDValue AlignedExtract = Vec;
9320   unsigned AlignedIdx = OrigIdx - RemIdx;
9321   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
9322     InterSubVT = getLMUL1VT(VecVT);
9323     // Extract a subvector equal to the nearest full vector register type. This
9324     // should resolve to a EXTRACT_SUBREG instruction.
9325     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
9326                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
9327   }
9328 
9329   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
9330                        DAG.getUNDEF(InterSubVT), SubVec,
9331                        DAG.getConstant(0, DL, XLenVT));
9332 
9333   auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
9334 
9335   VL = computeVLMax(SubVecVT, DL, DAG);
9336 
9337   // If we're inserting into the lowest elements, use a tail undisturbed
9338   // vmv.v.v.
9339   if (RemIdx == 0) {
9340     SubVec = DAG.getNode(RISCVISD::VMV_V_V_VL, DL, InterSubVT, AlignedExtract,
9341                          SubVec, VL);
9342   } else {
9343     SDValue SlideupAmt =
9344         DAG.getVScale(DL, XLenVT, APInt(XLenVT.getSizeInBits(), RemIdx));
9345 
9346     // Construct the vector length corresponding to RemIdx + length(SubVecVT).
9347     VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
9348 
9349     SubVec = getVSlideup(DAG, Subtarget, DL, InterSubVT, AlignedExtract, SubVec,
9350                          SlideupAmt, Mask, VL);
9351   }
9352 
9353   // If required, insert this subvector back into the correct vector register.
9354   // This should resolve to an INSERT_SUBREG instruction.
9355   if (VecVT.bitsGT(InterSubVT))
9356     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, SubVec,
9357                          DAG.getConstant(AlignedIdx, DL, XLenVT));
9358 
9359   // We might have bitcast from a mask type: cast back to the original type if
9360   // required.
9361   return DAG.getBitcast(Op.getSimpleValueType(), SubVec);
9362 }
9363 
9364 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
9365                                                     SelectionDAG &DAG) const {
9366   SDValue Vec = Op.getOperand(0);
9367   MVT SubVecVT = Op.getSimpleValueType();
9368   MVT VecVT = Vec.getSimpleValueType();
9369 
9370   SDLoc DL(Op);
9371   MVT XLenVT = Subtarget.getXLenVT();
9372   unsigned OrigIdx = Op.getConstantOperandVal(1);
9373   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9374 
9375   // We don't have the ability to slide mask vectors down indexed by their i1
9376   // elements; the smallest we can do is i8. Often we are able to bitcast to
9377   // equivalent i8 vectors. Note that when extracting a fixed-length vector
9378   // from a scalable one, we might not necessarily have enough scalable
9379   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
9380   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
9381     if (VecVT.getVectorMinNumElements() >= 8 &&
9382         SubVecVT.getVectorMinNumElements() >= 8) {
9383       assert(OrigIdx % 8 == 0 && "Invalid index");
9384       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
9385              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
9386              "Unexpected mask vector lowering");
9387       OrigIdx /= 8;
9388       SubVecVT =
9389           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
9390                            SubVecVT.isScalableVector());
9391       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
9392                                VecVT.isScalableVector());
9393       Vec = DAG.getBitcast(VecVT, Vec);
9394     } else {
9395       // We can't slide this mask vector down, indexed by its i1 elements.
9396       // This poses a problem when we wish to extract a scalable vector which
9397       // can't be re-expressed as a larger type. Just choose the slow path and
9398       // extend to a larger type, then truncate back down.
9399       // TODO: We could probably improve this when extracting certain fixed
9400       // from fixed, where we can extract as i8 and shift the correct element
9401       // right to reach the desired subvector?
9402       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
9403       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
9404       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
9405       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
9406                         Op.getOperand(1));
9407       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
9408       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
9409     }
9410   }
9411 
9412   // With an index of 0 this is a cast-like subvector, which can be performed
9413   // with subregister operations.
9414   if (OrigIdx == 0)
9415     return Op;
9416 
9417   // If the subvector vector is a fixed-length type, we cannot use subregister
9418   // manipulation to simplify the codegen; we don't know which register of a
9419   // LMUL group contains the specific subvector as we only know the minimum
9420   // register size. Therefore we must slide the vector group down the full
9421   // amount.
9422   if (SubVecVT.isFixedLengthVector()) {
9423     MVT ContainerVT = VecVT;
9424     if (VecVT.isFixedLengthVector()) {
9425       ContainerVT = getContainerForFixedLengthVector(VecVT);
9426       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
9427     }
9428 
9429     // Shrink down Vec so we're performing the slidedown on a smaller LMUL.
9430     unsigned LastIdx = OrigIdx + SubVecVT.getVectorNumElements() - 1;
9431     if (auto ShrunkVT =
9432             getSmallestVTForIndex(ContainerVT, LastIdx, DL, DAG, Subtarget)) {
9433       ContainerVT = *ShrunkVT;
9434       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ContainerVT, Vec,
9435                         DAG.getVectorIdxConstant(0, DL));
9436     }
9437 
9438     SDValue Mask =
9439         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
9440     // Set the vector length to only the number of elements we care about. This
9441     // avoids sliding down elements we're going to discard straight away.
9442     SDValue VL = getVLOp(SubVecVT.getVectorNumElements(), ContainerVT, DL, DAG,
9443                          Subtarget);
9444     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
9445     SDValue Slidedown =
9446         getVSlidedown(DAG, Subtarget, DL, ContainerVT,
9447                       DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
9448     // Now we can use a cast-like subvector extract to get the result.
9449     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
9450                             DAG.getConstant(0, DL, XLenVT));
9451     return DAG.getBitcast(Op.getValueType(), Slidedown);
9452   }
9453 
9454   unsigned SubRegIdx, RemIdx;
9455   std::tie(SubRegIdx, RemIdx) =
9456       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
9457           VecVT, SubVecVT, OrigIdx, TRI);
9458 
9459   // If the Idx has been completely eliminated then this is a subvector extract
9460   // which naturally aligns to a vector register. These can easily be handled
9461   // using subregister manipulation.
9462   if (RemIdx == 0)
9463     return Op;
9464 
9465   // Else SubVecVT is a fractional LMUL and may need to be slid down.
9466   assert(RISCVVType::decodeVLMUL(getLMUL(SubVecVT)).second);
9467 
9468   // If the vector type is an LMUL-group type, extract a subvector equal to the
9469   // nearest full vector register type.
9470   MVT InterSubVT = VecVT;
9471   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
9472     // If VecVT has an LMUL > 1, then SubVecVT should have a smaller LMUL, and
9473     // we should have successfully decomposed the extract into a subregister.
9474     assert(SubRegIdx != RISCV::NoSubRegister);
9475     InterSubVT = getLMUL1VT(VecVT);
9476     Vec = DAG.getTargetExtractSubreg(SubRegIdx, DL, InterSubVT, Vec);
9477   }
9478 
9479   // Slide this vector register down by the desired number of elements in order
9480   // to place the desired subvector starting at element 0.
9481   SDValue SlidedownAmt =
9482       DAG.getVScale(DL, XLenVT, APInt(XLenVT.getSizeInBits(), RemIdx));
9483 
9484   auto [Mask, VL] = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
9485   SDValue Slidedown =
9486       getVSlidedown(DAG, Subtarget, DL, InterSubVT, DAG.getUNDEF(InterSubVT),
9487                     Vec, SlidedownAmt, Mask, VL);
9488 
9489   // Now the vector is in the right position, extract our final subvector. This
9490   // should resolve to a COPY.
9491   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
9492                           DAG.getConstant(0, DL, XLenVT));
9493 
9494   // We might have bitcast from a mask type: cast back to the original type if
9495   // required.
9496   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
9497 }
9498 
9499 // Widen a vector's operands to i8, then truncate its results back to the
9500 // original type, typically i1.  All operand and result types must be the same.
9501 static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL,
9502                                   SelectionDAG &DAG) {
9503   MVT VT = N.getSimpleValueType();
9504   MVT WideVT = VT.changeVectorElementType(MVT::i8);
9505   SmallVector<SDValue, 4> WideOps;
9506   for (SDValue Op : N->ops()) {
9507     assert(Op.getSimpleValueType() == VT &&
9508            "Operands and result must be same type");
9509     WideOps.push_back(DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op));
9510   }
9511 
9512   unsigned NumVals = N->getNumValues();
9513 
9514   SDVTList VTs = DAG.getVTList(SmallVector<EVT, 4>(
9515       NumVals, N.getValueType().changeVectorElementType(MVT::i8)));
9516   SDValue WideN = DAG.getNode(N.getOpcode(), DL, VTs, WideOps);
9517   SmallVector<SDValue, 4> TruncVals;
9518   for (unsigned I = 0; I < NumVals; I++) {
9519     TruncVals.push_back(
9520         DAG.getSetCC(DL, N->getSimpleValueType(I), WideN.getValue(I),
9521                      DAG.getConstant(0, DL, WideVT), ISD::SETNE));
9522   }
9523 
9524   if (TruncVals.size() > 1)
9525     return DAG.getMergeValues(TruncVals, DL);
9526   return TruncVals.front();
9527 }
9528 
9529 SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
9530                                                       SelectionDAG &DAG) const {
9531   SDLoc DL(Op);
9532   MVT VecVT = Op.getSimpleValueType();
9533   MVT XLenVT = Subtarget.getXLenVT();
9534 
9535   assert(VecVT.isScalableVector() &&
9536          "vector_interleave on non-scalable vector!");
9537 
9538   // 1 bit element vectors need to be widened to e8
9539   if (VecVT.getVectorElementType() == MVT::i1)
9540     return widenVectorOpsToi8(Op, DL, DAG);
9541 
9542   // If the VT is LMUL=8, we need to split and reassemble.
9543   if (VecVT.getSizeInBits().getKnownMinValue() ==
9544       (8 * RISCV::RVVBitsPerBlock)) {
9545     auto [Op0Lo, Op0Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
9546     auto [Op1Lo, Op1Hi] = DAG.SplitVectorOperand(Op.getNode(), 1);
9547     EVT SplitVT = Op0Lo.getValueType();
9548 
9549     SDValue ResLo = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
9550                                 DAG.getVTList(SplitVT, SplitVT), Op0Lo, Op0Hi);
9551     SDValue ResHi = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
9552                                 DAG.getVTList(SplitVT, SplitVT), Op1Lo, Op1Hi);
9553 
9554     SDValue Even = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT,
9555                                ResLo.getValue(0), ResHi.getValue(0));
9556     SDValue Odd = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, ResLo.getValue(1),
9557                               ResHi.getValue(1));
9558     return DAG.getMergeValues({Even, Odd}, DL);
9559   }
9560 
9561   // Concatenate the two vectors as one vector to deinterleave
9562   MVT ConcatVT =
9563       MVT::getVectorVT(VecVT.getVectorElementType(),
9564                        VecVT.getVectorElementCount().multiplyCoefficientBy(2));
9565   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT,
9566                                Op.getOperand(0), Op.getOperand(1));
9567 
9568   // We want to operate on all lanes, so get the mask and VL and mask for it
9569   auto [Mask, VL] = getDefaultScalableVLOps(ConcatVT, DL, DAG, Subtarget);
9570   SDValue Passthru = DAG.getUNDEF(ConcatVT);
9571 
9572   // We can deinterleave through vnsrl.wi if the element type is smaller than
9573   // ELEN
9574   if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
9575     SDValue Even =
9576         getDeinterleaveViaVNSRL(DL, VecVT, Concat, true, Subtarget, DAG);
9577     SDValue Odd =
9578         getDeinterleaveViaVNSRL(DL, VecVT, Concat, false, Subtarget, DAG);
9579     return DAG.getMergeValues({Even, Odd}, DL);
9580   }
9581 
9582   // For the indices, use the same SEW to avoid an extra vsetvli
9583   MVT IdxVT = ConcatVT.changeVectorElementTypeToInteger();
9584   // Create a vector of even indices {0, 2, 4, ...}
9585   SDValue EvenIdx =
9586       DAG.getStepVector(DL, IdxVT, APInt(IdxVT.getScalarSizeInBits(), 2));
9587   // Create a vector of odd indices {1, 3, 5, ... }
9588   SDValue OddIdx =
9589       DAG.getNode(ISD::ADD, DL, IdxVT, EvenIdx, DAG.getConstant(1, DL, IdxVT));
9590 
9591   // Gather the even and odd elements into two separate vectors
9592   SDValue EvenWide = DAG.getNode(RISCVISD::VRGATHER_VV_VL, DL, ConcatVT,
9593                                  Concat, EvenIdx, Passthru, Mask, VL);
9594   SDValue OddWide = DAG.getNode(RISCVISD::VRGATHER_VV_VL, DL, ConcatVT,
9595                                 Concat, OddIdx, Passthru, Mask, VL);
9596 
9597   // Extract the result half of the gather for even and odd
9598   SDValue Even = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VecVT, EvenWide,
9599                              DAG.getConstant(0, DL, XLenVT));
9600   SDValue Odd = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VecVT, OddWide,
9601                             DAG.getConstant(0, DL, XLenVT));
9602 
9603   return DAG.getMergeValues({Even, Odd}, DL);
9604 }
9605 
9606 SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
9607                                                     SelectionDAG &DAG) const {
9608   SDLoc DL(Op);
9609   MVT VecVT = Op.getSimpleValueType();
9610 
9611   assert(VecVT.isScalableVector() &&
9612          "vector_interleave on non-scalable vector!");
9613 
9614   // i1 vectors need to be widened to i8
9615   if (VecVT.getVectorElementType() == MVT::i1)
9616     return widenVectorOpsToi8(Op, DL, DAG);
9617 
9618   MVT XLenVT = Subtarget.getXLenVT();
9619   SDValue VL = DAG.getRegister(RISCV::X0, XLenVT);
9620 
9621   // If the VT is LMUL=8, we need to split and reassemble.
9622   if (VecVT.getSizeInBits().getKnownMinValue() == (8 * RISCV::RVVBitsPerBlock)) {
9623     auto [Op0Lo, Op0Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
9624     auto [Op1Lo, Op1Hi] = DAG.SplitVectorOperand(Op.getNode(), 1);
9625     EVT SplitVT = Op0Lo.getValueType();
9626 
9627     SDValue ResLo = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
9628                                 DAG.getVTList(SplitVT, SplitVT), Op0Lo, Op1Lo);
9629     SDValue ResHi = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
9630                                 DAG.getVTList(SplitVT, SplitVT), Op0Hi, Op1Hi);
9631 
9632     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT,
9633                              ResLo.getValue(0), ResLo.getValue(1));
9634     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT,
9635                              ResHi.getValue(0), ResHi.getValue(1));
9636     return DAG.getMergeValues({Lo, Hi}, DL);
9637   }
9638 
9639   SDValue Interleaved;
9640 
9641   // If the element type is smaller than ELEN, then we can interleave with
9642   // vwaddu.vv and vwmaccu.vx
9643   if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
9644     Interleaved = getWideningInterleave(Op.getOperand(0), Op.getOperand(1), DL,
9645                                         DAG, Subtarget);
9646   } else {
9647     // Otherwise, fallback to using vrgathere16.vv
9648     MVT ConcatVT =
9649       MVT::getVectorVT(VecVT.getVectorElementType(),
9650                        VecVT.getVectorElementCount().multiplyCoefficientBy(2));
9651     SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT,
9652                                  Op.getOperand(0), Op.getOperand(1));
9653 
9654     MVT IdxVT = ConcatVT.changeVectorElementType(MVT::i16);
9655 
9656     // 0 1 2 3 4 5 6 7 ...
9657     SDValue StepVec = DAG.getStepVector(DL, IdxVT);
9658 
9659     // 1 1 1 1 1 1 1 1 ...
9660     SDValue Ones = DAG.getSplatVector(IdxVT, DL, DAG.getConstant(1, DL, XLenVT));
9661 
9662     // 1 0 1 0 1 0 1 0 ...
9663     SDValue OddMask = DAG.getNode(ISD::AND, DL, IdxVT, StepVec, Ones);
9664     OddMask = DAG.getSetCC(
9665         DL, IdxVT.changeVectorElementType(MVT::i1), OddMask,
9666         DAG.getSplatVector(IdxVT, DL, DAG.getConstant(0, DL, XLenVT)),
9667         ISD::CondCode::SETNE);
9668 
9669     SDValue VLMax = DAG.getSplatVector(IdxVT, DL, computeVLMax(VecVT, DL, DAG));
9670 
9671     // Build up the index vector for interleaving the concatenated vector
9672     //      0      0      1      1      2      2      3      3 ...
9673     SDValue Idx = DAG.getNode(ISD::SRL, DL, IdxVT, StepVec, Ones);
9674     //      0      n      1    n+1      2    n+2      3    n+3 ...
9675     Idx =
9676         DAG.getNode(RISCVISD::ADD_VL, DL, IdxVT, Idx, VLMax, Idx, OddMask, VL);
9677 
9678     // Then perform the interleave
9679     //   v[0]   v[n]   v[1] v[n+1]   v[2] v[n+2]   v[3] v[n+3] ...
9680     SDValue TrueMask = getAllOnesMask(IdxVT, VL, DL, DAG);
9681     Interleaved = DAG.getNode(RISCVISD::VRGATHEREI16_VV_VL, DL, ConcatVT,
9682                               Concat, Idx, DAG.getUNDEF(ConcatVT), TrueMask, VL);
9683   }
9684 
9685   // Extract the two halves from the interleaved result
9686   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VecVT, Interleaved,
9687                            DAG.getVectorIdxConstant(0, DL));
9688   SDValue Hi = DAG.getNode(
9689       ISD::EXTRACT_SUBVECTOR, DL, VecVT, Interleaved,
9690       DAG.getVectorIdxConstant(VecVT.getVectorMinNumElements(), DL));
9691 
9692   return DAG.getMergeValues({Lo, Hi}, DL);
9693 }
9694 
9695 // Lower step_vector to the vid instruction. Any non-identity step value must
9696 // be accounted for my manual expansion.
9697 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
9698                                               SelectionDAG &DAG) const {
9699   SDLoc DL(Op);
9700   MVT VT = Op.getSimpleValueType();
9701   assert(VT.isScalableVector() && "Expected scalable vector");
9702   MVT XLenVT = Subtarget.getXLenVT();
9703   auto [Mask, VL] = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
9704   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
9705   uint64_t StepValImm = Op.getConstantOperandVal(0);
9706   if (StepValImm != 1) {
9707     if (isPowerOf2_64(StepValImm)) {
9708       SDValue StepVal =
9709           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
9710                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT), VL);
9711       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
9712     } else {
9713       SDValue StepVal = lowerScalarSplat(
9714           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
9715           VL, VT, DL, DAG, Subtarget);
9716       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
9717     }
9718   }
9719   return StepVec;
9720 }
9721 
9722 // Implement vector_reverse using vrgather.vv with indices determined by
9723 // subtracting the id of each element from (VLMAX-1). This will convert
9724 // the indices like so:
9725 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
9726 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
9727 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
9728                                                  SelectionDAG &DAG) const {
9729   SDLoc DL(Op);
9730   MVT VecVT = Op.getSimpleValueType();
9731   if (VecVT.getVectorElementType() == MVT::i1) {
9732     MVT WidenVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
9733     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, Op.getOperand(0));
9734     SDValue Op2 = DAG.getNode(ISD::VECTOR_REVERSE, DL, WidenVT, Op1);
9735     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
9736   }
9737   unsigned EltSize = VecVT.getScalarSizeInBits();
9738   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
9739   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
9740   unsigned MaxVLMAX =
9741     RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
9742 
9743   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
9744   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
9745 
9746   // If this is SEW=8 and VLMAX is potentially more than 256, we need
9747   // to use vrgatherei16.vv.
9748   // TODO: It's also possible to use vrgatherei16.vv for other types to
9749   // decrease register width for the index calculation.
9750   if (MaxVLMAX > 256 && EltSize == 8) {
9751     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
9752     // Reverse each half, then reassemble them in reverse order.
9753     // NOTE: It's also possible that after splitting that VLMAX no longer
9754     // requires vrgatherei16.vv.
9755     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
9756       auto [Lo, Hi] = DAG.SplitVectorOperand(Op.getNode(), 0);
9757       auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VecVT);
9758       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
9759       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
9760       // Reassemble the low and high pieces reversed.
9761       // FIXME: This is a CONCAT_VECTORS.
9762       SDValue Res =
9763           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
9764                       DAG.getIntPtrConstant(0, DL));
9765       return DAG.getNode(
9766           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
9767           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
9768     }
9769 
9770     // Just promote the int type to i16 which will double the LMUL.
9771     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
9772     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
9773   }
9774 
9775   MVT XLenVT = Subtarget.getXLenVT();
9776   auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
9777 
9778   // Calculate VLMAX-1 for the desired SEW.
9779   SDValue VLMinus1 = DAG.getNode(ISD::SUB, DL, XLenVT,
9780                                  computeVLMax(VecVT, DL, DAG),
9781                                  DAG.getConstant(1, DL, XLenVT));
9782 
9783   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
9784   bool IsRV32E64 =
9785       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
9786   SDValue SplatVL;
9787   if (!IsRV32E64)
9788     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
9789   else
9790     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
9791                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
9792 
9793   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
9794   SDValue Indices = DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID,
9795                                 DAG.getUNDEF(IntVT), Mask, VL);
9796 
9797   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices,
9798                      DAG.getUNDEF(VecVT), Mask, VL);
9799 }
9800 
9801 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
9802                                                 SelectionDAG &DAG) const {
9803   SDLoc DL(Op);
9804   SDValue V1 = Op.getOperand(0);
9805   SDValue V2 = Op.getOperand(1);
9806   MVT XLenVT = Subtarget.getXLenVT();
9807   MVT VecVT = Op.getSimpleValueType();
9808 
9809   SDValue VLMax = computeVLMax(VecVT, DL, DAG);
9810 
9811   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
9812   SDValue DownOffset, UpOffset;
9813   if (ImmValue >= 0) {
9814     // The operand is a TargetConstant, we need to rebuild it as a regular
9815     // constant.
9816     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
9817     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
9818   } else {
9819     // The operand is a TargetConstant, we need to rebuild it as a regular
9820     // constant rather than negating the original operand.
9821     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
9822     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
9823   }
9824 
9825   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
9826 
9827   SDValue SlideDown =
9828       getVSlidedown(DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1,
9829                     DownOffset, TrueMask, UpOffset);
9830   return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
9831                      TrueMask, DAG.getRegister(RISCV::X0, XLenVT),
9832                      RISCVII::TAIL_AGNOSTIC);
9833 }
9834 
9835 SDValue
9836 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
9837                                                      SelectionDAG &DAG) const {
9838   SDLoc DL(Op);
9839   auto *Load = cast<LoadSDNode>(Op);
9840 
9841   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
9842                                         Load->getMemoryVT(),
9843                                         *Load->getMemOperand()) &&
9844          "Expecting a correctly-aligned load");
9845 
9846   MVT VT = Op.getSimpleValueType();
9847   MVT XLenVT = Subtarget.getXLenVT();
9848   MVT ContainerVT = getContainerForFixedLengthVector(VT);
9849 
9850   // If we know the exact VLEN and our fixed length vector completely fills
9851   // the container, use a whole register load instead.
9852   const auto [MinVLMAX, MaxVLMAX] =
9853       RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
9854   if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
9855       getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
9856     SDValue NewLoad =
9857         DAG.getLoad(ContainerVT, DL, Load->getChain(), Load->getBasePtr(),
9858                     Load->getMemOperand());
9859     SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
9860     return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
9861   }
9862 
9863   SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG, Subtarget);
9864 
9865   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
9866   SDValue IntID = DAG.getTargetConstant(
9867       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
9868   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
9869   if (!IsMaskOp)
9870     Ops.push_back(DAG.getUNDEF(ContainerVT));
9871   Ops.push_back(Load->getBasePtr());
9872   Ops.push_back(VL);
9873   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
9874   SDValue NewLoad =
9875       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
9876                               Load->getMemoryVT(), Load->getMemOperand());
9877 
9878   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
9879   return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
9880 }
9881 
9882 SDValue
9883 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
9884                                                       SelectionDAG &DAG) const {
9885   SDLoc DL(Op);
9886   auto *Store = cast<StoreSDNode>(Op);
9887 
9888   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
9889                                         Store->getMemoryVT(),
9890                                         *Store->getMemOperand()) &&
9891          "Expecting a correctly-aligned store");
9892 
9893   SDValue StoreVal = Store->getValue();
9894   MVT VT = StoreVal.getSimpleValueType();
9895   MVT XLenVT = Subtarget.getXLenVT();
9896 
9897   // If the size less than a byte, we need to pad with zeros to make a byte.
9898   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
9899     VT = MVT::v8i1;
9900     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
9901                            DAG.getConstant(0, DL, VT), StoreVal,
9902                            DAG.getIntPtrConstant(0, DL));
9903   }
9904 
9905   MVT ContainerVT = getContainerForFixedLengthVector(VT);
9906 
9907   SDValue NewValue =
9908       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
9909 
9910 
9911   // If we know the exact VLEN and our fixed length vector completely fills
9912   // the container, use a whole register store instead.
9913   const auto [MinVLMAX, MaxVLMAX] =
9914       RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
9915   if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
9916       getLMUL1VT(ContainerVT).bitsLE(ContainerVT))
9917     return DAG.getStore(Store->getChain(), DL, NewValue, Store->getBasePtr(),
9918                         Store->getMemOperand());
9919 
9920   SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
9921                        Subtarget);
9922 
9923   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
9924   SDValue IntID = DAG.getTargetConstant(
9925       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
9926   return DAG.getMemIntrinsicNode(
9927       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
9928       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
9929       Store->getMemoryVT(), Store->getMemOperand());
9930 }
9931 
9932 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
9933                                              SelectionDAG &DAG) const {
9934   SDLoc DL(Op);
9935   MVT VT = Op.getSimpleValueType();
9936 
9937   const auto *MemSD = cast<MemSDNode>(Op);
9938   EVT MemVT = MemSD->getMemoryVT();
9939   MachineMemOperand *MMO = MemSD->getMemOperand();
9940   SDValue Chain = MemSD->getChain();
9941   SDValue BasePtr = MemSD->getBasePtr();
9942 
9943   SDValue Mask, PassThru, VL;
9944   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
9945     Mask = VPLoad->getMask();
9946     PassThru = DAG.getUNDEF(VT);
9947     VL = VPLoad->getVectorLength();
9948   } else {
9949     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
9950     Mask = MLoad->getMask();
9951     PassThru = MLoad->getPassThru();
9952   }
9953 
9954   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
9955 
9956   MVT XLenVT = Subtarget.getXLenVT();
9957 
9958   MVT ContainerVT = VT;
9959   if (VT.isFixedLengthVector()) {
9960     ContainerVT = getContainerForFixedLengthVector(VT);
9961     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
9962     if (!IsUnmasked) {
9963       MVT MaskVT = getMaskTypeFor(ContainerVT);
9964       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
9965     }
9966   }
9967 
9968   if (!VL)
9969     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
9970 
9971   unsigned IntID =
9972       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
9973   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
9974   if (IsUnmasked)
9975     Ops.push_back(DAG.getUNDEF(ContainerVT));
9976   else
9977     Ops.push_back(PassThru);
9978   Ops.push_back(BasePtr);
9979   if (!IsUnmasked)
9980     Ops.push_back(Mask);
9981   Ops.push_back(VL);
9982   if (!IsUnmasked)
9983     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
9984 
9985   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
9986 
9987   SDValue Result =
9988       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
9989   Chain = Result.getValue(1);
9990 
9991   if (VT.isFixedLengthVector())
9992     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
9993 
9994   return DAG.getMergeValues({Result, Chain}, DL);
9995 }
9996 
9997 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
9998                                               SelectionDAG &DAG) const {
9999   SDLoc DL(Op);
10000 
10001   const auto *MemSD = cast<MemSDNode>(Op);
10002   EVT MemVT = MemSD->getMemoryVT();
10003   MachineMemOperand *MMO = MemSD->getMemOperand();
10004   SDValue Chain = MemSD->getChain();
10005   SDValue BasePtr = MemSD->getBasePtr();
10006   SDValue Val, Mask, VL;
10007 
10008   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
10009     Val = VPStore->getValue();
10010     Mask = VPStore->getMask();
10011     VL = VPStore->getVectorLength();
10012   } else {
10013     const auto *MStore = cast<MaskedStoreSDNode>(Op);
10014     Val = MStore->getValue();
10015     Mask = MStore->getMask();
10016   }
10017 
10018   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
10019 
10020   MVT VT = Val.getSimpleValueType();
10021   MVT XLenVT = Subtarget.getXLenVT();
10022 
10023   MVT ContainerVT = VT;
10024   if (VT.isFixedLengthVector()) {
10025     ContainerVT = getContainerForFixedLengthVector(VT);
10026 
10027     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
10028     if (!IsUnmasked) {
10029       MVT MaskVT = getMaskTypeFor(ContainerVT);
10030       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10031     }
10032   }
10033 
10034   if (!VL)
10035     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
10036 
10037   unsigned IntID =
10038       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
10039   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
10040   Ops.push_back(Val);
10041   Ops.push_back(BasePtr);
10042   if (!IsUnmasked)
10043     Ops.push_back(Mask);
10044   Ops.push_back(VL);
10045 
10046   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
10047                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
10048 }
10049 
10050 SDValue
10051 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
10052                                                       SelectionDAG &DAG) const {
10053   MVT InVT = Op.getOperand(0).getSimpleValueType();
10054   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
10055 
10056   MVT VT = Op.getSimpleValueType();
10057 
10058   SDValue Op1 =
10059       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
10060   SDValue Op2 =
10061       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
10062 
10063   SDLoc DL(Op);
10064   auto [Mask, VL] = getDefaultVLOps(VT.getVectorNumElements(), ContainerVT, DL,
10065                                     DAG, Subtarget);
10066   MVT MaskVT = getMaskTypeFor(ContainerVT);
10067 
10068   SDValue Cmp =
10069       DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT,
10070                   {Op1, Op2, Op.getOperand(2), DAG.getUNDEF(MaskVT), Mask, VL});
10071 
10072   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
10073 }
10074 
10075 SDValue RISCVTargetLowering::lowerVectorStrictFSetcc(SDValue Op,
10076                                                      SelectionDAG &DAG) const {
10077   unsigned Opc = Op.getOpcode();
10078   SDLoc DL(Op);
10079   SDValue Chain = Op.getOperand(0);
10080   SDValue Op1 = Op.getOperand(1);
10081   SDValue Op2 = Op.getOperand(2);
10082   SDValue CC = Op.getOperand(3);
10083   ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
10084   MVT VT = Op.getSimpleValueType();
10085   MVT InVT = Op1.getSimpleValueType();
10086 
10087   // RVV VMFEQ/VMFNE ignores qNan, so we expand strict_fsetccs with OEQ/UNE
10088   // condition code.
10089   if (Opc == ISD::STRICT_FSETCCS) {
10090     // Expand strict_fsetccs(x, oeq) to
10091     // (and strict_fsetccs(x, y, oge), strict_fsetccs(x, y, ole))
10092     SDVTList VTList = Op->getVTList();
10093     if (CCVal == ISD::SETEQ || CCVal == ISD::SETOEQ) {
10094       SDValue OLECCVal = DAG.getCondCode(ISD::SETOLE);
10095       SDValue Tmp1 = DAG.getNode(ISD::STRICT_FSETCCS, DL, VTList, Chain, Op1,
10096                                  Op2, OLECCVal);
10097       SDValue Tmp2 = DAG.getNode(ISD::STRICT_FSETCCS, DL, VTList, Chain, Op2,
10098                                  Op1, OLECCVal);
10099       SDValue OutChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
10100                                      Tmp1.getValue(1), Tmp2.getValue(1));
10101       // Tmp1 and Tmp2 might be the same node.
10102       if (Tmp1 != Tmp2)
10103         Tmp1 = DAG.getNode(ISD::AND, DL, VT, Tmp1, Tmp2);
10104       return DAG.getMergeValues({Tmp1, OutChain}, DL);
10105     }
10106 
10107     // Expand (strict_fsetccs x, y, une) to (not (strict_fsetccs x, y, oeq))
10108     if (CCVal == ISD::SETNE || CCVal == ISD::SETUNE) {
10109       SDValue OEQCCVal = DAG.getCondCode(ISD::SETOEQ);
10110       SDValue OEQ = DAG.getNode(ISD::STRICT_FSETCCS, DL, VTList, Chain, Op1,
10111                                 Op2, OEQCCVal);
10112       SDValue Res = DAG.getNOT(DL, OEQ, VT);
10113       return DAG.getMergeValues({Res, OEQ.getValue(1)}, DL);
10114     }
10115   }
10116 
10117   MVT ContainerInVT = InVT;
10118   if (InVT.isFixedLengthVector()) {
10119     ContainerInVT = getContainerForFixedLengthVector(InVT);
10120     Op1 = convertToScalableVector(ContainerInVT, Op1, DAG, Subtarget);
10121     Op2 = convertToScalableVector(ContainerInVT, Op2, DAG, Subtarget);
10122   }
10123   MVT MaskVT = getMaskTypeFor(ContainerInVT);
10124 
10125   auto [Mask, VL] = getDefaultVLOps(InVT, ContainerInVT, DL, DAG, Subtarget);
10126 
10127   SDValue Res;
10128   if (Opc == ISD::STRICT_FSETCC &&
10129       (CCVal == ISD::SETLT || CCVal == ISD::SETOLT || CCVal == ISD::SETLE ||
10130        CCVal == ISD::SETOLE)) {
10131     // VMFLT/VMFLE/VMFGT/VMFGE raise exception for qNan. Generate a mask to only
10132     // active when both input elements are ordered.
10133     SDValue True = getAllOnesMask(ContainerInVT, VL, DL, DAG);
10134     SDValue OrderMask1 = DAG.getNode(
10135         RISCVISD::STRICT_FSETCC_VL, DL, DAG.getVTList(MaskVT, MVT::Other),
10136         {Chain, Op1, Op1, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10137          True, VL});
10138     SDValue OrderMask2 = DAG.getNode(
10139         RISCVISD::STRICT_FSETCC_VL, DL, DAG.getVTList(MaskVT, MVT::Other),
10140         {Chain, Op2, Op2, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10141          True, VL});
10142     Mask =
10143         DAG.getNode(RISCVISD::VMAND_VL, DL, MaskVT, OrderMask1, OrderMask2, VL);
10144     // Use Mask as the merge operand to let the result be 0 if either of the
10145     // inputs is unordered.
10146     Res = DAG.getNode(RISCVISD::STRICT_FSETCCS_VL, DL,
10147                       DAG.getVTList(MaskVT, MVT::Other),
10148                       {Chain, Op1, Op2, CC, Mask, Mask, VL});
10149   } else {
10150     unsigned RVVOpc = Opc == ISD::STRICT_FSETCC ? RISCVISD::STRICT_FSETCC_VL
10151                                                 : RISCVISD::STRICT_FSETCCS_VL;
10152     Res = DAG.getNode(RVVOpc, DL, DAG.getVTList(MaskVT, MVT::Other),
10153                       {Chain, Op1, Op2, CC, DAG.getUNDEF(MaskVT), Mask, VL});
10154   }
10155 
10156   if (VT.isFixedLengthVector()) {
10157     SDValue SubVec = convertFromScalableVector(VT, Res, DAG, Subtarget);
10158     return DAG.getMergeValues({SubVec, Res.getValue(1)}, DL);
10159   }
10160   return Res;
10161 }
10162 
10163 // Lower vector ABS to smax(X, sub(0, X)).
10164 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
10165   SDLoc DL(Op);
10166   MVT VT = Op.getSimpleValueType();
10167   SDValue X = Op.getOperand(0);
10168 
10169   assert((Op.getOpcode() == ISD::VP_ABS || VT.isFixedLengthVector()) &&
10170          "Unexpected type for ISD::ABS");
10171 
10172   MVT ContainerVT = VT;
10173   if (VT.isFixedLengthVector()) {
10174     ContainerVT = getContainerForFixedLengthVector(VT);
10175     X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
10176   }
10177 
10178   SDValue Mask, VL;
10179   if (Op->getOpcode() == ISD::VP_ABS) {
10180     Mask = Op->getOperand(1);
10181     if (VT.isFixedLengthVector())
10182       Mask = convertToScalableVector(getMaskTypeFor(ContainerVT), Mask, DAG,
10183                                      Subtarget);
10184     VL = Op->getOperand(2);
10185   } else
10186     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
10187 
10188   SDValue SplatZero = DAG.getNode(
10189       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
10190       DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
10191   SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
10192                              DAG.getUNDEF(ContainerVT), Mask, VL);
10193   SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
10194                             DAG.getUNDEF(ContainerVT), Mask, VL);
10195 
10196   if (VT.isFixedLengthVector())
10197     Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
10198   return Max;
10199 }
10200 
10201 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
10202     SDValue Op, SelectionDAG &DAG) const {
10203   SDLoc DL(Op);
10204   MVT VT = Op.getSimpleValueType();
10205   SDValue Mag = Op.getOperand(0);
10206   SDValue Sign = Op.getOperand(1);
10207   assert(Mag.getValueType() == Sign.getValueType() &&
10208          "Can only handle COPYSIGN with matching types.");
10209 
10210   MVT ContainerVT = getContainerForFixedLengthVector(VT);
10211   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
10212   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
10213 
10214   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
10215 
10216   SDValue CopySign = DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag,
10217                                  Sign, DAG.getUNDEF(ContainerVT), Mask, VL);
10218 
10219   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
10220 }
10221 
10222 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
10223     SDValue Op, SelectionDAG &DAG) const {
10224   MVT VT = Op.getSimpleValueType();
10225   MVT ContainerVT = getContainerForFixedLengthVector(VT);
10226 
10227   MVT I1ContainerVT =
10228       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
10229 
10230   SDValue CC =
10231       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
10232   SDValue Op1 =
10233       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
10234   SDValue Op2 =
10235       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
10236 
10237   SDLoc DL(Op);
10238   SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
10239 
10240   SDValue Select =
10241       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
10242 
10243   return convertFromScalableVector(VT, Select, DAG, Subtarget);
10244 }
10245 
10246 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
10247                                                SelectionDAG &DAG) const {
10248   unsigned NewOpc = getRISCVVLOp(Op);
10249   bool HasMergeOp = hasMergeOp(NewOpc);
10250   bool HasMask = hasMaskOp(NewOpc);
10251 
10252   MVT VT = Op.getSimpleValueType();
10253   MVT ContainerVT = getContainerForFixedLengthVector(VT);
10254 
10255   // Create list of operands by converting existing ones to scalable types.
10256   SmallVector<SDValue, 6> Ops;
10257   for (const SDValue &V : Op->op_values()) {
10258     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
10259 
10260     // Pass through non-vector operands.
10261     if (!V.getValueType().isVector()) {
10262       Ops.push_back(V);
10263       continue;
10264     }
10265 
10266     // "cast" fixed length vector to a scalable vector.
10267     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
10268            "Only fixed length vectors are supported!");
10269     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
10270   }
10271 
10272   SDLoc DL(Op);
10273   auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
10274   if (HasMergeOp)
10275     Ops.push_back(DAG.getUNDEF(ContainerVT));
10276   if (HasMask)
10277     Ops.push_back(Mask);
10278   Ops.push_back(VL);
10279 
10280   // StrictFP operations have two result values. Their lowered result should
10281   // have same result count.
10282   if (Op->isStrictFPOpcode()) {
10283     SDValue ScalableRes =
10284         DAG.getNode(NewOpc, DL, DAG.getVTList(ContainerVT, MVT::Other), Ops,
10285                     Op->getFlags());
10286     SDValue SubVec = convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
10287     return DAG.getMergeValues({SubVec, ScalableRes.getValue(1)}, DL);
10288   }
10289 
10290   SDValue ScalableRes =
10291       DAG.getNode(NewOpc, DL, ContainerVT, Ops, Op->getFlags());
10292   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
10293 }
10294 
10295 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
10296 // * Operands of each node are assumed to be in the same order.
10297 // * The EVL operand is promoted from i32 to i64 on RV64.
10298 // * Fixed-length vectors are converted to their scalable-vector container
10299 //   types.
10300 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const {
10301   unsigned RISCVISDOpc = getRISCVVLOp(Op);
10302   bool HasMergeOp = hasMergeOp(RISCVISDOpc);
10303 
10304   SDLoc DL(Op);
10305   MVT VT = Op.getSimpleValueType();
10306   SmallVector<SDValue, 4> Ops;
10307 
10308   MVT ContainerVT = VT;
10309   if (VT.isFixedLengthVector())
10310     ContainerVT = getContainerForFixedLengthVector(VT);
10311 
10312   for (const auto &OpIdx : enumerate(Op->ops())) {
10313     SDValue V = OpIdx.value();
10314     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
10315     // Add dummy merge value before the mask.
10316     if (HasMergeOp && *ISD::getVPMaskIdx(Op.getOpcode()) == OpIdx.index())
10317       Ops.push_back(DAG.getUNDEF(ContainerVT));
10318     // Pass through operands which aren't fixed-length vectors.
10319     if (!V.getValueType().isFixedLengthVector()) {
10320       Ops.push_back(V);
10321       continue;
10322     }
10323     // "cast" fixed length vector to a scalable vector.
10324     MVT OpVT = V.getSimpleValueType();
10325     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
10326     assert(useRVVForFixedLengthVectorVT(OpVT) &&
10327            "Only fixed length vectors are supported!");
10328     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
10329   }
10330 
10331   if (!VT.isFixedLengthVector())
10332     return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags());
10333 
10334   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags());
10335 
10336   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
10337 }
10338 
10339 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
10340                                               SelectionDAG &DAG) const {
10341   SDLoc DL(Op);
10342   MVT VT = Op.getSimpleValueType();
10343 
10344   SDValue Src = Op.getOperand(0);
10345   // NOTE: Mask is dropped.
10346   SDValue VL = Op.getOperand(2);
10347 
10348   MVT ContainerVT = VT;
10349   if (VT.isFixedLengthVector()) {
10350     ContainerVT = getContainerForFixedLengthVector(VT);
10351     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
10352     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
10353   }
10354 
10355   MVT XLenVT = Subtarget.getXLenVT();
10356   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
10357   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
10358                                   DAG.getUNDEF(ContainerVT), Zero, VL);
10359 
10360   SDValue SplatValue = DAG.getConstant(
10361       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
10362   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
10363                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
10364 
10365   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
10366                                Splat, ZeroSplat, VL);
10367   if (!VT.isFixedLengthVector())
10368     return Result;
10369   return convertFromScalableVector(VT, Result, DAG, Subtarget);
10370 }
10371 
10372 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
10373                                                 SelectionDAG &DAG) const {
10374   SDLoc DL(Op);
10375   MVT VT = Op.getSimpleValueType();
10376 
10377   SDValue Op1 = Op.getOperand(0);
10378   SDValue Op2 = Op.getOperand(1);
10379   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
10380   // NOTE: Mask is dropped.
10381   SDValue VL = Op.getOperand(4);
10382 
10383   MVT ContainerVT = VT;
10384   if (VT.isFixedLengthVector()) {
10385     ContainerVT = getContainerForFixedLengthVector(VT);
10386     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
10387     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
10388   }
10389 
10390   SDValue Result;
10391   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
10392 
10393   switch (Condition) {
10394   default:
10395     break;
10396   // X != Y  --> (X^Y)
10397   case ISD::SETNE:
10398     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
10399     break;
10400   // X == Y  --> ~(X^Y)
10401   case ISD::SETEQ: {
10402     SDValue Temp =
10403         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
10404     Result =
10405         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
10406     break;
10407   }
10408   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
10409   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
10410   case ISD::SETGT:
10411   case ISD::SETULT: {
10412     SDValue Temp =
10413         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
10414     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
10415     break;
10416   }
10417   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
10418   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
10419   case ISD::SETLT:
10420   case ISD::SETUGT: {
10421     SDValue Temp =
10422         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
10423     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
10424     break;
10425   }
10426   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
10427   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
10428   case ISD::SETGE:
10429   case ISD::SETULE: {
10430     SDValue Temp =
10431         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
10432     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
10433     break;
10434   }
10435   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
10436   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
10437   case ISD::SETLE:
10438   case ISD::SETUGE: {
10439     SDValue Temp =
10440         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
10441     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
10442     break;
10443   }
10444   }
10445 
10446   if (!VT.isFixedLengthVector())
10447     return Result;
10448   return convertFromScalableVector(VT, Result, DAG, Subtarget);
10449 }
10450 
10451 // Lower Floating-Point/Integer Type-Convert VP SDNodes
10452 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op,
10453                                                 SelectionDAG &DAG) const {
10454   SDLoc DL(Op);
10455 
10456   SDValue Src = Op.getOperand(0);
10457   SDValue Mask = Op.getOperand(1);
10458   SDValue VL = Op.getOperand(2);
10459   unsigned RISCVISDOpc = getRISCVVLOp(Op);
10460 
10461   MVT DstVT = Op.getSimpleValueType();
10462   MVT SrcVT = Src.getSimpleValueType();
10463   if (DstVT.isFixedLengthVector()) {
10464     DstVT = getContainerForFixedLengthVector(DstVT);
10465     SrcVT = getContainerForFixedLengthVector(SrcVT);
10466     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
10467     MVT MaskVT = getMaskTypeFor(DstVT);
10468     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10469   }
10470 
10471   unsigned DstEltSize = DstVT.getScalarSizeInBits();
10472   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
10473 
10474   SDValue Result;
10475   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
10476     if (SrcVT.isInteger()) {
10477       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
10478 
10479       unsigned RISCVISDExtOpc = RISCVISDOpc == RISCVISD::SINT_TO_FP_VL
10480                                     ? RISCVISD::VSEXT_VL
10481                                     : RISCVISD::VZEXT_VL;
10482 
10483       // Do we need to do any pre-widening before converting?
10484       if (SrcEltSize == 1) {
10485         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
10486         MVT XLenVT = Subtarget.getXLenVT();
10487         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
10488         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
10489                                         DAG.getUNDEF(IntVT), Zero, VL);
10490         SDValue One = DAG.getConstant(
10491             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
10492         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
10493                                        DAG.getUNDEF(IntVT), One, VL);
10494         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
10495                           ZeroSplat, VL);
10496       } else if (DstEltSize > (2 * SrcEltSize)) {
10497         // Widen before converting.
10498         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
10499                                      DstVT.getVectorElementCount());
10500         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
10501       }
10502 
10503       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
10504     } else {
10505       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
10506              "Wrong input/output vector types");
10507 
10508       // Convert f16 to f32 then convert f32 to i64.
10509       if (DstEltSize > (2 * SrcEltSize)) {
10510         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
10511         MVT InterimFVT =
10512             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
10513         Src =
10514             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
10515       }
10516 
10517       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
10518     }
10519   } else { // Narrowing + Conversion
10520     if (SrcVT.isInteger()) {
10521       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
10522       // First do a narrowing convert to an FP type half the size, then round
10523       // the FP type to a small FP type if needed.
10524 
10525       MVT InterimFVT = DstVT;
10526       if (SrcEltSize > (2 * DstEltSize)) {
10527         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
10528         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
10529         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
10530       }
10531 
10532       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
10533 
10534       if (InterimFVT != DstVT) {
10535         Src = Result;
10536         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
10537       }
10538     } else {
10539       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
10540              "Wrong input/output vector types");
10541       // First do a narrowing conversion to an integer half the size, then
10542       // truncate if needed.
10543 
10544       if (DstEltSize == 1) {
10545         // First convert to the same size integer, then convert to mask using
10546         // setcc.
10547         assert(SrcEltSize >= 16 && "Unexpected FP type!");
10548         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
10549                                           DstVT.getVectorElementCount());
10550         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
10551 
10552         // Compare the integer result to 0. The integer should be 0 or 1/-1,
10553         // otherwise the conversion was undefined.
10554         MVT XLenVT = Subtarget.getXLenVT();
10555         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
10556         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
10557                                 DAG.getUNDEF(InterimIVT), SplatZero, VL);
10558         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT,
10559                              {Result, SplatZero, DAG.getCondCode(ISD::SETNE),
10560                               DAG.getUNDEF(DstVT), Mask, VL});
10561       } else {
10562         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
10563                                           DstVT.getVectorElementCount());
10564 
10565         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
10566 
10567         while (InterimIVT != DstVT) {
10568           SrcEltSize /= 2;
10569           Src = Result;
10570           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
10571                                         DstVT.getVectorElementCount());
10572           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
10573                                Src, Mask, VL);
10574         }
10575       }
10576     }
10577   }
10578 
10579   MVT VT = Op.getSimpleValueType();
10580   if (!VT.isFixedLengthVector())
10581     return Result;
10582   return convertFromScalableVector(VT, Result, DAG, Subtarget);
10583 }
10584 
10585 SDValue
10586 RISCVTargetLowering::lowerVPReverseExperimental(SDValue Op,
10587                                                 SelectionDAG &DAG) const {
10588   SDLoc DL(Op);
10589   MVT VT = Op.getSimpleValueType();
10590   MVT XLenVT = Subtarget.getXLenVT();
10591 
10592   SDValue Op1 = Op.getOperand(0);
10593   SDValue Mask = Op.getOperand(1);
10594   SDValue EVL = Op.getOperand(2);
10595 
10596   MVT ContainerVT = VT;
10597   if (VT.isFixedLengthVector()) {
10598     ContainerVT = getContainerForFixedLengthVector(VT);
10599     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
10600     MVT MaskVT = getMaskTypeFor(ContainerVT);
10601     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10602   }
10603 
10604   MVT GatherVT = ContainerVT;
10605   MVT IndicesVT = ContainerVT.changeVectorElementTypeToInteger();
10606   // Check if we are working with mask vectors
10607   bool IsMaskVector = ContainerVT.getVectorElementType() == MVT::i1;
10608   if (IsMaskVector) {
10609     GatherVT = IndicesVT = ContainerVT.changeVectorElementType(MVT::i8);
10610 
10611     // Expand input operand
10612     SDValue SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IndicesVT,
10613                                    DAG.getUNDEF(IndicesVT),
10614                                    DAG.getConstant(1, DL, XLenVT), EVL);
10615     SDValue SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IndicesVT,
10616                                     DAG.getUNDEF(IndicesVT),
10617                                     DAG.getConstant(0, DL, XLenVT), EVL);
10618     Op1 = DAG.getNode(RISCVISD::VSELECT_VL, DL, IndicesVT, Op1, SplatOne,
10619                       SplatZero, EVL);
10620   }
10621 
10622   unsigned EltSize = GatherVT.getScalarSizeInBits();
10623   unsigned MinSize = GatherVT.getSizeInBits().getKnownMinValue();
10624   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
10625   unsigned MaxVLMAX =
10626       RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
10627 
10628   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
10629   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
10630   // to use vrgatherei16.vv.
10631   // TODO: It's also possible to use vrgatherei16.vv for other types to
10632   // decrease register width for the index calculation.
10633   // NOTE: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
10634   if (MaxVLMAX > 256 && EltSize == 8) {
10635     // If this is LMUL=8, we have to split before using vrgatherei16.vv.
10636     // Split the vector in half and reverse each half using a full register
10637     // reverse.
10638     // Swap the halves and concatenate them.
10639     // Slide the concatenated result by (VLMax - VL).
10640     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
10641       auto [LoVT, HiVT] = DAG.GetSplitDestVTs(GatherVT);
10642       auto [Lo, Hi] = DAG.SplitVector(Op1, DL);
10643 
10644       SDValue LoRev = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
10645       SDValue HiRev = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
10646 
10647       // Reassemble the low and high pieces reversed.
10648       // NOTE: this Result is unmasked (because we do not need masks for
10649       // shuffles). If in the future this has to change, we can use a SELECT_VL
10650       // between Result and UNDEF using the mask originally passed to VP_REVERSE
10651       SDValue Result =
10652           DAG.getNode(ISD::CONCAT_VECTORS, DL, GatherVT, HiRev, LoRev);
10653 
10654       // Slide off any elements from past EVL that were reversed into the low
10655       // elements.
10656       unsigned MinElts = GatherVT.getVectorMinNumElements();
10657       SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
10658                                   DAG.getConstant(MinElts, DL, XLenVT));
10659       SDValue Diff = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, EVL);
10660 
10661       Result = getVSlidedown(DAG, Subtarget, DL, GatherVT,
10662                              DAG.getUNDEF(GatherVT), Result, Diff, Mask, EVL);
10663 
10664       if (IsMaskVector) {
10665         // Truncate Result back to a mask vector
10666         Result =
10667             DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT,
10668                         {Result, DAG.getConstant(0, DL, GatherVT),
10669                          DAG.getCondCode(ISD::SETNE),
10670                          DAG.getUNDEF(getMaskTypeFor(ContainerVT)), Mask, EVL});
10671       }
10672 
10673       if (!VT.isFixedLengthVector())
10674         return Result;
10675       return convertFromScalableVector(VT, Result, DAG, Subtarget);
10676     }
10677 
10678     // Just promote the int type to i16 which will double the LMUL.
10679     IndicesVT = MVT::getVectorVT(MVT::i16, IndicesVT.getVectorElementCount());
10680     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
10681   }
10682 
10683   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IndicesVT, Mask, EVL);
10684   SDValue VecLen =
10685       DAG.getNode(ISD::SUB, DL, XLenVT, EVL, DAG.getConstant(1, DL, XLenVT));
10686   SDValue VecLenSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IndicesVT,
10687                                     DAG.getUNDEF(IndicesVT), VecLen, EVL);
10688   SDValue VRSUB = DAG.getNode(RISCVISD::SUB_VL, DL, IndicesVT, VecLenSplat, VID,
10689                               DAG.getUNDEF(IndicesVT), Mask, EVL);
10690   SDValue Result = DAG.getNode(GatherOpc, DL, GatherVT, Op1, VRSUB,
10691                                DAG.getUNDEF(GatherVT), Mask, EVL);
10692 
10693   if (IsMaskVector) {
10694     // Truncate Result back to a mask vector
10695     Result = DAG.getNode(
10696         RISCVISD::SETCC_VL, DL, ContainerVT,
10697         {Result, DAG.getConstant(0, DL, GatherVT), DAG.getCondCode(ISD::SETNE),
10698          DAG.getUNDEF(getMaskTypeFor(ContainerVT)), Mask, EVL});
10699   }
10700 
10701   if (!VT.isFixedLengthVector())
10702     return Result;
10703   return convertFromScalableVector(VT, Result, DAG, Subtarget);
10704 }
10705 
10706 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op,
10707                                             SelectionDAG &DAG) const {
10708   MVT VT = Op.getSimpleValueType();
10709   if (VT.getVectorElementType() != MVT::i1)
10710     return lowerVPOp(Op, DAG);
10711 
10712   // It is safe to drop mask parameter as masked-off elements are undef.
10713   SDValue Op1 = Op->getOperand(0);
10714   SDValue Op2 = Op->getOperand(1);
10715   SDValue VL = Op->getOperand(3);
10716 
10717   MVT ContainerVT = VT;
10718   const bool IsFixed = VT.isFixedLengthVector();
10719   if (IsFixed) {
10720     ContainerVT = getContainerForFixedLengthVector(VT);
10721     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
10722     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
10723   }
10724 
10725   SDLoc DL(Op);
10726   SDValue Val = DAG.getNode(getRISCVVLOp(Op), DL, ContainerVT, Op1, Op2, VL);
10727   if (!IsFixed)
10728     return Val;
10729   return convertFromScalableVector(VT, Val, DAG, Subtarget);
10730 }
10731 
10732 SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
10733                                                 SelectionDAG &DAG) const {
10734   SDLoc DL(Op);
10735   MVT XLenVT = Subtarget.getXLenVT();
10736   MVT VT = Op.getSimpleValueType();
10737   MVT ContainerVT = VT;
10738   if (VT.isFixedLengthVector())
10739     ContainerVT = getContainerForFixedLengthVector(VT);
10740 
10741   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
10742 
10743   auto *VPNode = cast<VPStridedLoadSDNode>(Op);
10744   // Check if the mask is known to be all ones
10745   SDValue Mask = VPNode->getMask();
10746   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
10747 
10748   SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vlse
10749                                                    : Intrinsic::riscv_vlse_mask,
10750                                         DL, XLenVT);
10751   SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID,
10752                               DAG.getUNDEF(ContainerVT), VPNode->getBasePtr(),
10753                               VPNode->getStride()};
10754   if (!IsUnmasked) {
10755     if (VT.isFixedLengthVector()) {
10756       MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
10757       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10758     }
10759     Ops.push_back(Mask);
10760   }
10761   Ops.push_back(VPNode->getVectorLength());
10762   if (!IsUnmasked) {
10763     SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
10764     Ops.push_back(Policy);
10765   }
10766 
10767   SDValue Result =
10768       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
10769                               VPNode->getMemoryVT(), VPNode->getMemOperand());
10770   SDValue Chain = Result.getValue(1);
10771 
10772   if (VT.isFixedLengthVector())
10773     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
10774 
10775   return DAG.getMergeValues({Result, Chain}, DL);
10776 }
10777 
10778 SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op,
10779                                                  SelectionDAG &DAG) const {
10780   SDLoc DL(Op);
10781   MVT XLenVT = Subtarget.getXLenVT();
10782 
10783   auto *VPNode = cast<VPStridedStoreSDNode>(Op);
10784   SDValue StoreVal = VPNode->getValue();
10785   MVT VT = StoreVal.getSimpleValueType();
10786   MVT ContainerVT = VT;
10787   if (VT.isFixedLengthVector()) {
10788     ContainerVT = getContainerForFixedLengthVector(VT);
10789     StoreVal = convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
10790   }
10791 
10792   // Check if the mask is known to be all ones
10793   SDValue Mask = VPNode->getMask();
10794   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
10795 
10796   SDValue IntID = DAG.getTargetConstant(IsUnmasked ? Intrinsic::riscv_vsse
10797                                                    : Intrinsic::riscv_vsse_mask,
10798                                         DL, XLenVT);
10799   SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID, StoreVal,
10800                               VPNode->getBasePtr(), VPNode->getStride()};
10801   if (!IsUnmasked) {
10802     if (VT.isFixedLengthVector()) {
10803       MVT MaskVT = ContainerVT.changeVectorElementType(MVT::i1);
10804       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10805     }
10806     Ops.push_back(Mask);
10807   }
10808   Ops.push_back(VPNode->getVectorLength());
10809 
10810   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, VPNode->getVTList(),
10811                                  Ops, VPNode->getMemoryVT(),
10812                                  VPNode->getMemOperand());
10813 }
10814 
10815 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
10816 // matched to a RVV indexed load. The RVV indexed load instructions only
10817 // support the "unsigned unscaled" addressing mode; indices are implicitly
10818 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
10819 // signed or scaled indexing is extended to the XLEN value type and scaled
10820 // accordingly.
10821 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
10822                                                SelectionDAG &DAG) const {
10823   SDLoc DL(Op);
10824   MVT VT = Op.getSimpleValueType();
10825 
10826   const auto *MemSD = cast<MemSDNode>(Op.getNode());
10827   EVT MemVT = MemSD->getMemoryVT();
10828   MachineMemOperand *MMO = MemSD->getMemOperand();
10829   SDValue Chain = MemSD->getChain();
10830   SDValue BasePtr = MemSD->getBasePtr();
10831 
10832   ISD::LoadExtType LoadExtType;
10833   SDValue Index, Mask, PassThru, VL;
10834 
10835   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
10836     Index = VPGN->getIndex();
10837     Mask = VPGN->getMask();
10838     PassThru = DAG.getUNDEF(VT);
10839     VL = VPGN->getVectorLength();
10840     // VP doesn't support extending loads.
10841     LoadExtType = ISD::NON_EXTLOAD;
10842   } else {
10843     // Else it must be a MGATHER.
10844     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
10845     Index = MGN->getIndex();
10846     Mask = MGN->getMask();
10847     PassThru = MGN->getPassThru();
10848     LoadExtType = MGN->getExtensionType();
10849   }
10850 
10851   MVT IndexVT = Index.getSimpleValueType();
10852   MVT XLenVT = Subtarget.getXLenVT();
10853 
10854   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
10855          "Unexpected VTs!");
10856   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
10857   // Targets have to explicitly opt-in for extending vector loads.
10858   assert(LoadExtType == ISD::NON_EXTLOAD &&
10859          "Unexpected extending MGATHER/VP_GATHER");
10860   (void)LoadExtType;
10861 
10862   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
10863   // the selection of the masked intrinsics doesn't do this for us.
10864   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
10865 
10866   MVT ContainerVT = VT;
10867   if (VT.isFixedLengthVector()) {
10868     ContainerVT = getContainerForFixedLengthVector(VT);
10869     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
10870                                ContainerVT.getVectorElementCount());
10871 
10872     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
10873 
10874     if (!IsUnmasked) {
10875       MVT MaskVT = getMaskTypeFor(ContainerVT);
10876       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10877       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
10878     }
10879   }
10880 
10881   if (!VL)
10882     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
10883 
10884   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
10885     IndexVT = IndexVT.changeVectorElementType(XLenVT);
10886     Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
10887   }
10888 
10889   unsigned IntID =
10890       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
10891   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
10892   if (IsUnmasked)
10893     Ops.push_back(DAG.getUNDEF(ContainerVT));
10894   else
10895     Ops.push_back(PassThru);
10896   Ops.push_back(BasePtr);
10897   Ops.push_back(Index);
10898   if (!IsUnmasked)
10899     Ops.push_back(Mask);
10900   Ops.push_back(VL);
10901   if (!IsUnmasked)
10902     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
10903 
10904   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
10905   SDValue Result =
10906       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
10907   Chain = Result.getValue(1);
10908 
10909   if (VT.isFixedLengthVector())
10910     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
10911 
10912   return DAG.getMergeValues({Result, Chain}, DL);
10913 }
10914 
10915 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
10916 // matched to a RVV indexed store. The RVV indexed store instructions only
10917 // support the "unsigned unscaled" addressing mode; indices are implicitly
10918 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
10919 // signed or scaled indexing is extended to the XLEN value type and scaled
10920 // accordingly.
10921 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
10922                                                 SelectionDAG &DAG) const {
10923   SDLoc DL(Op);
10924   const auto *MemSD = cast<MemSDNode>(Op.getNode());
10925   EVT MemVT = MemSD->getMemoryVT();
10926   MachineMemOperand *MMO = MemSD->getMemOperand();
10927   SDValue Chain = MemSD->getChain();
10928   SDValue BasePtr = MemSD->getBasePtr();
10929 
10930   bool IsTruncatingStore = false;
10931   SDValue Index, Mask, Val, VL;
10932 
10933   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
10934     Index = VPSN->getIndex();
10935     Mask = VPSN->getMask();
10936     Val = VPSN->getValue();
10937     VL = VPSN->getVectorLength();
10938     // VP doesn't support truncating stores.
10939     IsTruncatingStore = false;
10940   } else {
10941     // Else it must be a MSCATTER.
10942     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
10943     Index = MSN->getIndex();
10944     Mask = MSN->getMask();
10945     Val = MSN->getValue();
10946     IsTruncatingStore = MSN->isTruncatingStore();
10947   }
10948 
10949   MVT VT = Val.getSimpleValueType();
10950   MVT IndexVT = Index.getSimpleValueType();
10951   MVT XLenVT = Subtarget.getXLenVT();
10952 
10953   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
10954          "Unexpected VTs!");
10955   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
10956   // Targets have to explicitly opt-in for extending vector loads and
10957   // truncating vector stores.
10958   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
10959   (void)IsTruncatingStore;
10960 
10961   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
10962   // the selection of the masked intrinsics doesn't do this for us.
10963   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
10964 
10965   MVT ContainerVT = VT;
10966   if (VT.isFixedLengthVector()) {
10967     ContainerVT = getContainerForFixedLengthVector(VT);
10968     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
10969                                ContainerVT.getVectorElementCount());
10970 
10971     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
10972     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
10973 
10974     if (!IsUnmasked) {
10975       MVT MaskVT = getMaskTypeFor(ContainerVT);
10976       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
10977     }
10978   }
10979 
10980   if (!VL)
10981     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
10982 
10983   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
10984     IndexVT = IndexVT.changeVectorElementType(XLenVT);
10985     Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
10986   }
10987 
10988   unsigned IntID =
10989       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
10990   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
10991   Ops.push_back(Val);
10992   Ops.push_back(BasePtr);
10993   Ops.push_back(Index);
10994   if (!IsUnmasked)
10995     Ops.push_back(Mask);
10996   Ops.push_back(VL);
10997 
10998   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
10999                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
11000 }
11001 
11002 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
11003                                                SelectionDAG &DAG) const {
11004   const MVT XLenVT = Subtarget.getXLenVT();
11005   SDLoc DL(Op);
11006   SDValue Chain = Op->getOperand(0);
11007   SDValue SysRegNo = DAG.getTargetConstant(
11008       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
11009   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
11010   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
11011 
11012   // Encoding used for rounding mode in RISC-V differs from that used in
11013   // FLT_ROUNDS. To convert it the RISC-V rounding mode is used as an index in a
11014   // table, which consists of a sequence of 4-bit fields, each representing
11015   // corresponding FLT_ROUNDS mode.
11016   static const int Table =
11017       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
11018       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
11019       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
11020       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
11021       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
11022 
11023   SDValue Shift =
11024       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
11025   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
11026                                 DAG.getConstant(Table, DL, XLenVT), Shift);
11027   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
11028                                DAG.getConstant(7, DL, XLenVT));
11029 
11030   return DAG.getMergeValues({Masked, Chain}, DL);
11031 }
11032 
11033 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
11034                                                SelectionDAG &DAG) const {
11035   const MVT XLenVT = Subtarget.getXLenVT();
11036   SDLoc DL(Op);
11037   SDValue Chain = Op->getOperand(0);
11038   SDValue RMValue = Op->getOperand(1);
11039   SDValue SysRegNo = DAG.getTargetConstant(
11040       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
11041 
11042   // Encoding used for rounding mode in RISC-V differs from that used in
11043   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
11044   // a table, which consists of a sequence of 4-bit fields, each representing
11045   // corresponding RISC-V mode.
11046   static const unsigned Table =
11047       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
11048       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
11049       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
11050       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
11051       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
11052 
11053   RMValue = DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, RMValue);
11054 
11055   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
11056                               DAG.getConstant(2, DL, XLenVT));
11057   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
11058                                 DAG.getConstant(Table, DL, XLenVT), Shift);
11059   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
11060                         DAG.getConstant(0x7, DL, XLenVT));
11061   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
11062                      RMValue);
11063 }
11064 
11065 SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
11066                                                SelectionDAG &DAG) const {
11067   MachineFunction &MF = DAG.getMachineFunction();
11068 
11069   bool isRISCV64 = Subtarget.is64Bit();
11070   EVT PtrVT = getPointerTy(DAG.getDataLayout());
11071 
11072   int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false);
11073   return DAG.getFrameIndex(FI, PtrVT);
11074 }
11075 
11076 // Returns the opcode of the target-specific SDNode that implements the 32-bit
11077 // form of the given Opcode.
11078 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
11079   switch (Opcode) {
11080   default:
11081     llvm_unreachable("Unexpected opcode");
11082   case ISD::SHL:
11083     return RISCVISD::SLLW;
11084   case ISD::SRA:
11085     return RISCVISD::SRAW;
11086   case ISD::SRL:
11087     return RISCVISD::SRLW;
11088   case ISD::SDIV:
11089     return RISCVISD::DIVW;
11090   case ISD::UDIV:
11091     return RISCVISD::DIVUW;
11092   case ISD::UREM:
11093     return RISCVISD::REMUW;
11094   case ISD::ROTL:
11095     return RISCVISD::ROLW;
11096   case ISD::ROTR:
11097     return RISCVISD::RORW;
11098   }
11099 }
11100 
11101 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
11102 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
11103 // otherwise be promoted to i64, making it difficult to select the
11104 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
11105 // type i8/i16/i32 is lost.
11106 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
11107                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
11108   SDLoc DL(N);
11109   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
11110   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
11111   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
11112   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
11113   // ReplaceNodeResults requires we maintain the same type for the return value.
11114   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
11115 }
11116 
11117 // Converts the given 32-bit operation to a i64 operation with signed extension
11118 // semantic to reduce the signed extension instructions.
11119 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
11120   SDLoc DL(N);
11121   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
11122   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11123   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
11124   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
11125                                DAG.getValueType(MVT::i32));
11126   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
11127 }
11128 
11129 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
11130                                              SmallVectorImpl<SDValue> &Results,
11131                                              SelectionDAG &DAG) const {
11132   SDLoc DL(N);
11133   switch (N->getOpcode()) {
11134   default:
11135     llvm_unreachable("Don't know how to custom type legalize this operation!");
11136   case ISD::STRICT_FP_TO_SINT:
11137   case ISD::STRICT_FP_TO_UINT:
11138   case ISD::FP_TO_SINT:
11139   case ISD::FP_TO_UINT: {
11140     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11141            "Unexpected custom legalisation");
11142     bool IsStrict = N->isStrictFPOpcode();
11143     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
11144                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
11145     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
11146     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
11147         TargetLowering::TypeSoftenFloat) {
11148       if (!isTypeLegal(Op0.getValueType()))
11149         return;
11150       if (IsStrict) {
11151         SDValue Chain = N->getOperand(0);
11152         // In absense of Zfh, promote f16 to f32, then convert.
11153         if (Op0.getValueType() == MVT::f16 &&
11154             !Subtarget.hasStdExtZfhOrZhinx()) {
11155           Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
11156                             {Chain, Op0});
11157           Chain = Op0.getValue(1);
11158         }
11159         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
11160                                 : RISCVISD::STRICT_FCVT_WU_RV64;
11161         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
11162         SDValue Res = DAG.getNode(
11163             Opc, DL, VTs, Chain, Op0,
11164             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
11165         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11166         Results.push_back(Res.getValue(1));
11167         return;
11168       }
11169       // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
11170       // convert.
11171       if ((Op0.getValueType() == MVT::f16 &&
11172            !Subtarget.hasStdExtZfhOrZhinx()) ||
11173           Op0.getValueType() == MVT::bf16)
11174         Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
11175 
11176       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
11177       SDValue Res =
11178           DAG.getNode(Opc, DL, MVT::i64, Op0,
11179                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
11180       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11181       return;
11182     }
11183     // If the FP type needs to be softened, emit a library call using the 'si'
11184     // version. If we left it to default legalization we'd end up with 'di'. If
11185     // the FP type doesn't need to be softened just let generic type
11186     // legalization promote the result type.
11187     RTLIB::Libcall LC;
11188     if (IsSigned)
11189       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
11190     else
11191       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
11192     MakeLibCallOptions CallOptions;
11193     EVT OpVT = Op0.getValueType();
11194     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
11195     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
11196     SDValue Result;
11197     std::tie(Result, Chain) =
11198         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
11199     Results.push_back(Result);
11200     if (IsStrict)
11201       Results.push_back(Chain);
11202     break;
11203   }
11204   case ISD::LROUND: {
11205     SDValue Op0 = N->getOperand(0);
11206     EVT Op0VT = Op0.getValueType();
11207     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
11208         TargetLowering::TypeSoftenFloat) {
11209       if (!isTypeLegal(Op0VT))
11210         return;
11211 
11212       // In absense of Zfh, promote f16 to f32, then convert.
11213       if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
11214         Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
11215 
11216       SDValue Res =
11217           DAG.getNode(RISCVISD::FCVT_W_RV64, DL, MVT::i64, Op0,
11218                       DAG.getTargetConstant(RISCVFPRndMode::RMM, DL, MVT::i64));
11219       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11220       return;
11221     }
11222     // If the FP type needs to be softened, emit a library call to lround. We'll
11223     // need to truncate the result. We assume any value that doesn't fit in i32
11224     // is allowed to return an unspecified value.
11225     RTLIB::Libcall LC =
11226         Op0.getValueType() == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
11227     MakeLibCallOptions CallOptions;
11228     EVT OpVT = Op0.getValueType();
11229     CallOptions.setTypeListBeforeSoften(OpVT, MVT::i64, true);
11230     SDValue Result = makeLibCall(DAG, LC, MVT::i64, Op0, CallOptions, DL).first;
11231     Result = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Result);
11232     Results.push_back(Result);
11233     break;
11234   }
11235   case ISD::READCYCLECOUNTER: {
11236     assert(!Subtarget.is64Bit() &&
11237            "READCYCLECOUNTER only has custom type legalization on riscv32");
11238 
11239     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11240     SDValue RCW =
11241         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
11242 
11243     Results.push_back(
11244         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
11245     Results.push_back(RCW.getValue(2));
11246     break;
11247   }
11248   case ISD::LOAD: {
11249     if (!ISD::isNON_EXTLoad(N))
11250       return;
11251 
11252     // Use a SEXTLOAD instead of the default EXTLOAD. Similar to the
11253     // sext_inreg we emit for ADD/SUB/MUL/SLLI.
11254     LoadSDNode *Ld = cast<LoadSDNode>(N);
11255 
11256     SDLoc dl(N);
11257     SDValue Res = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Ld->getChain(),
11258                                  Ld->getBasePtr(), Ld->getMemoryVT(),
11259                                  Ld->getMemOperand());
11260     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Res));
11261     Results.push_back(Res.getValue(1));
11262     return;
11263   }
11264   case ISD::MUL: {
11265     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
11266     unsigned XLen = Subtarget.getXLen();
11267     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
11268     if (Size > XLen) {
11269       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
11270       SDValue LHS = N->getOperand(0);
11271       SDValue RHS = N->getOperand(1);
11272       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
11273 
11274       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
11275       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
11276       // We need exactly one side to be unsigned.
11277       if (LHSIsU == RHSIsU)
11278         return;
11279 
11280       auto MakeMULPair = [&](SDValue S, SDValue U) {
11281         MVT XLenVT = Subtarget.getXLenVT();
11282         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
11283         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
11284         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
11285         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
11286         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
11287       };
11288 
11289       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
11290       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
11291 
11292       // The other operand should be signed, but still prefer MULH when
11293       // possible.
11294       if (RHSIsU && LHSIsS && !RHSIsS)
11295         Results.push_back(MakeMULPair(LHS, RHS));
11296       else if (LHSIsU && RHSIsS && !LHSIsS)
11297         Results.push_back(MakeMULPair(RHS, LHS));
11298 
11299       return;
11300     }
11301     [[fallthrough]];
11302   }
11303   case ISD::ADD:
11304   case ISD::SUB:
11305     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11306            "Unexpected custom legalisation");
11307     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
11308     break;
11309   case ISD::SHL:
11310   case ISD::SRA:
11311   case ISD::SRL:
11312     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11313            "Unexpected custom legalisation");
11314     if (N->getOperand(1).getOpcode() != ISD::Constant) {
11315       // If we can use a BSET instruction, allow default promotion to apply.
11316       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
11317           isOneConstant(N->getOperand(0)))
11318         break;
11319       Results.push_back(customLegalizeToWOp(N, DAG));
11320       break;
11321     }
11322 
11323     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
11324     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
11325     // shift amount.
11326     if (N->getOpcode() == ISD::SHL) {
11327       SDLoc DL(N);
11328       SDValue NewOp0 =
11329           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
11330       SDValue NewOp1 =
11331           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
11332       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
11333       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
11334                                    DAG.getValueType(MVT::i32));
11335       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
11336     }
11337 
11338     break;
11339   case ISD::ROTL:
11340   case ISD::ROTR:
11341     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11342            "Unexpected custom legalisation");
11343     assert((Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
11344             Subtarget.hasVendorXTHeadBb()) &&
11345            "Unexpected custom legalization");
11346     if (!isa<ConstantSDNode>(N->getOperand(1)) &&
11347         !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()))
11348       return;
11349     Results.push_back(customLegalizeToWOp(N, DAG));
11350     break;
11351   case ISD::CTTZ:
11352   case ISD::CTTZ_ZERO_UNDEF:
11353   case ISD::CTLZ:
11354   case ISD::CTLZ_ZERO_UNDEF: {
11355     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11356            "Unexpected custom legalisation");
11357 
11358     SDValue NewOp0 =
11359         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
11360     bool IsCTZ =
11361         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
11362     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
11363     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
11364     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11365     return;
11366   }
11367   case ISD::SDIV:
11368   case ISD::UDIV:
11369   case ISD::UREM: {
11370     MVT VT = N->getSimpleValueType(0);
11371     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
11372            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
11373            "Unexpected custom legalisation");
11374     // Don't promote division/remainder by constant since we should expand those
11375     // to multiply by magic constant.
11376     AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11377     if (N->getOperand(1).getOpcode() == ISD::Constant &&
11378         !isIntDivCheap(N->getValueType(0), Attr))
11379       return;
11380 
11381     // If the input is i32, use ANY_EXTEND since the W instructions don't read
11382     // the upper 32 bits. For other types we need to sign or zero extend
11383     // based on the opcode.
11384     unsigned ExtOpc = ISD::ANY_EXTEND;
11385     if (VT != MVT::i32)
11386       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
11387                                            : ISD::ZERO_EXTEND;
11388 
11389     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
11390     break;
11391   }
11392   case ISD::SADDO: {
11393     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11394            "Unexpected custom legalisation");
11395 
11396     // If the RHS is a constant, we can simplify ConditionRHS below. Otherwise
11397     // use the default legalization.
11398     if (!isa<ConstantSDNode>(N->getOperand(1)))
11399       return;
11400 
11401     SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
11402     SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
11403     SDValue Res = DAG.getNode(ISD::ADD, DL, MVT::i64, LHS, RHS);
11404     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
11405                       DAG.getValueType(MVT::i32));
11406 
11407     SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
11408 
11409     // For an addition, the result should be less than one of the operands (LHS)
11410     // if and only if the other operand (RHS) is negative, otherwise there will
11411     // be overflow.
11412     // For a subtraction, the result should be less than one of the operands
11413     // (LHS) if and only if the other operand (RHS) is (non-zero) positive,
11414     // otherwise there will be overflow.
11415     EVT OType = N->getValueType(1);
11416     SDValue ResultLowerThanLHS = DAG.getSetCC(DL, OType, Res, LHS, ISD::SETLT);
11417     SDValue ConditionRHS = DAG.getSetCC(DL, OType, RHS, Zero, ISD::SETLT);
11418 
11419     SDValue Overflow =
11420         DAG.getNode(ISD::XOR, DL, OType, ConditionRHS, ResultLowerThanLHS);
11421     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11422     Results.push_back(Overflow);
11423     return;
11424   }
11425   case ISD::UADDO:
11426   case ISD::USUBO: {
11427     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11428            "Unexpected custom legalisation");
11429     bool IsAdd = N->getOpcode() == ISD::UADDO;
11430     // Create an ADDW or SUBW.
11431     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
11432     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11433     SDValue Res =
11434         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
11435     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
11436                       DAG.getValueType(MVT::i32));
11437 
11438     SDValue Overflow;
11439     if (IsAdd && isOneConstant(RHS)) {
11440       // Special case uaddo X, 1 overflowed if the addition result is 0.
11441       // The general case (X + C) < C is not necessarily beneficial. Although we
11442       // reduce the live range of X, we may introduce the materialization of
11443       // constant C, especially when the setcc result is used by branch. We have
11444       // no compare with constant and branch instructions.
11445       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
11446                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
11447     } else if (IsAdd && isAllOnesConstant(RHS)) {
11448       // Special case uaddo X, -1 overflowed if X != 0.
11449       Overflow = DAG.getSetCC(DL, N->getValueType(1), N->getOperand(0),
11450                               DAG.getConstant(0, DL, MVT::i32), ISD::SETNE);
11451     } else {
11452       // Sign extend the LHS and perform an unsigned compare with the ADDW
11453       // result. Since the inputs are sign extended from i32, this is equivalent
11454       // to comparing the lower 32 bits.
11455       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
11456       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
11457                               IsAdd ? ISD::SETULT : ISD::SETUGT);
11458     }
11459 
11460     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11461     Results.push_back(Overflow);
11462     return;
11463   }
11464   case ISD::UADDSAT:
11465   case ISD::USUBSAT: {
11466     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11467            "Unexpected custom legalisation");
11468     if (Subtarget.hasStdExtZbb()) {
11469       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
11470       // sign extend allows overflow of the lower 32 bits to be detected on
11471       // the promoted size.
11472       SDValue LHS =
11473           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
11474       SDValue RHS =
11475           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
11476       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
11477       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11478       return;
11479     }
11480 
11481     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
11482     // promotion for UADDO/USUBO.
11483     Results.push_back(expandAddSubSat(N, DAG));
11484     return;
11485   }
11486   case ISD::ABS: {
11487     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
11488            "Unexpected custom legalisation");
11489 
11490     if (Subtarget.hasStdExtZbb()) {
11491       // Emit a special ABSW node that will be expanded to NEGW+MAX at isel.
11492       // This allows us to remember that the result is sign extended. Expanding
11493       // to NEGW+MAX here requires a Freeze which breaks ComputeNumSignBits.
11494       SDValue Src = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64,
11495                                 N->getOperand(0));
11496       SDValue Abs = DAG.getNode(RISCVISD::ABSW, DL, MVT::i64, Src);
11497       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Abs));
11498       return;
11499     }
11500 
11501     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
11502     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
11503 
11504     // Freeze the source so we can increase it's use count.
11505     Src = DAG.getFreeze(Src);
11506 
11507     // Copy sign bit to all bits using the sraiw pattern.
11508     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
11509                                    DAG.getValueType(MVT::i32));
11510     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
11511                            DAG.getConstant(31, DL, MVT::i64));
11512 
11513     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
11514     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
11515 
11516     // NOTE: The result is only required to be anyextended, but sext is
11517     // consistent with type legalization of sub.
11518     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
11519                          DAG.getValueType(MVT::i32));
11520     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
11521     return;
11522   }
11523   case ISD::BITCAST: {
11524     EVT VT = N->getValueType(0);
11525     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
11526     SDValue Op0 = N->getOperand(0);
11527     EVT Op0VT = Op0.getValueType();
11528     MVT XLenVT = Subtarget.getXLenVT();
11529     if (VT == MVT::i16 && Op0VT == MVT::f16 &&
11530         Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) {
11531       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
11532       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
11533     } else if (VT == MVT::i16 && Op0VT == MVT::bf16 &&
11534         Subtarget.hasStdExtZfbfmin()) {
11535       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
11536       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
11537     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
11538                Subtarget.hasStdExtFOrZfinx()) {
11539       SDValue FPConv =
11540           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
11541       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
11542     } else if (VT == MVT::i64 && Op0VT == MVT::f64 && XLenVT == MVT::i32 &&
11543                Subtarget.hasStdExtZfa()) {
11544       SDValue NewReg = DAG.getNode(RISCVISD::SplitF64, DL,
11545                                    DAG.getVTList(MVT::i32, MVT::i32), Op0);
11546       SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
11547                                    NewReg.getValue(0), NewReg.getValue(1));
11548       Results.push_back(RetReg);
11549     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
11550                isTypeLegal(Op0VT)) {
11551       // Custom-legalize bitcasts from fixed-length vector types to illegal
11552       // scalar types in order to improve codegen. Bitcast the vector to a
11553       // one-element vector type whose element type is the same as the result
11554       // type, and extract the first element.
11555       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
11556       if (isTypeLegal(BVT)) {
11557         SDValue BVec = DAG.getBitcast(BVT, Op0);
11558         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
11559                                       DAG.getConstant(0, DL, XLenVT)));
11560       }
11561     }
11562     break;
11563   }
11564   case RISCVISD::BREV8: {
11565     MVT VT = N->getSimpleValueType(0);
11566     MVT XLenVT = Subtarget.getXLenVT();
11567     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
11568            "Unexpected custom legalisation");
11569     assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
11570     SDValue NewOp = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
11571     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp);
11572     // ReplaceNodeResults requires we maintain the same type for the return
11573     // value.
11574     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
11575     break;
11576   }
11577   case ISD::EXTRACT_VECTOR_ELT: {
11578     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
11579     // type is illegal (currently only vXi64 RV32).
11580     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
11581     // transferred to the destination register. We issue two of these from the
11582     // upper- and lower- halves of the SEW-bit vector element, slid down to the
11583     // first element.
11584     SDValue Vec = N->getOperand(0);
11585     SDValue Idx = N->getOperand(1);
11586 
11587     // The vector type hasn't been legalized yet so we can't issue target
11588     // specific nodes if it needs legalization.
11589     // FIXME: We would manually legalize if it's important.
11590     if (!isTypeLegal(Vec.getValueType()))
11591       return;
11592 
11593     MVT VecVT = Vec.getSimpleValueType();
11594 
11595     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
11596            VecVT.getVectorElementType() == MVT::i64 &&
11597            "Unexpected EXTRACT_VECTOR_ELT legalization");
11598 
11599     // If this is a fixed vector, we need to convert it to a scalable vector.
11600     MVT ContainerVT = VecVT;
11601     if (VecVT.isFixedLengthVector()) {
11602       ContainerVT = getContainerForFixedLengthVector(VecVT);
11603       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
11604     }
11605 
11606     MVT XLenVT = Subtarget.getXLenVT();
11607 
11608     // Use a VL of 1 to avoid processing more elements than we need.
11609     auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
11610 
11611     // Unless the index is known to be 0, we must slide the vector down to get
11612     // the desired element into index 0.
11613     if (!isNullConstant(Idx)) {
11614       Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
11615                           DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
11616     }
11617 
11618     // Extract the lower XLEN bits of the correct vector element.
11619     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
11620 
11621     // To extract the upper XLEN bits of the vector element, shift the first
11622     // element right by 32 bits and re-extract the lower XLEN bits.
11623     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
11624                                      DAG.getUNDEF(ContainerVT),
11625                                      DAG.getConstant(32, DL, XLenVT), VL);
11626     SDValue LShr32 =
11627         DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, ThirtyTwoV,
11628                     DAG.getUNDEF(ContainerVT), Mask, VL);
11629 
11630     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
11631 
11632     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
11633     break;
11634   }
11635   case ISD::INTRINSIC_WO_CHAIN: {
11636     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11637     switch (IntNo) {
11638     default:
11639       llvm_unreachable(
11640           "Don't know how to custom type legalize this intrinsic!");
11641     case Intrinsic::experimental_get_vector_length: {
11642       SDValue Res = lowerGetVectorLength(N, DAG, Subtarget);
11643       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11644       return;
11645     }
11646     case Intrinsic::riscv_orc_b:
11647     case Intrinsic::riscv_brev8:
11648     case Intrinsic::riscv_sha256sig0:
11649     case Intrinsic::riscv_sha256sig1:
11650     case Intrinsic::riscv_sha256sum0:
11651     case Intrinsic::riscv_sha256sum1:
11652     case Intrinsic::riscv_sm3p0:
11653     case Intrinsic::riscv_sm3p1: {
11654       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
11655         return;
11656       unsigned Opc;
11657       switch (IntNo) {
11658       case Intrinsic::riscv_orc_b:      Opc = RISCVISD::ORC_B;      break;
11659       case Intrinsic::riscv_brev8:      Opc = RISCVISD::BREV8;      break;
11660       case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
11661       case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
11662       case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
11663       case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
11664       case Intrinsic::riscv_sm3p0:      Opc = RISCVISD::SM3P0;      break;
11665       case Intrinsic::riscv_sm3p1:      Opc = RISCVISD::SM3P1;      break;
11666       }
11667 
11668       SDValue NewOp =
11669           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11670       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp);
11671       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11672       return;
11673     }
11674     case Intrinsic::riscv_sm4ks:
11675     case Intrinsic::riscv_sm4ed: {
11676       unsigned Opc =
11677           IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
11678       SDValue NewOp0 =
11679           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11680       SDValue NewOp1 =
11681           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
11682       SDValue Res =
11683           DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, N->getOperand(3));
11684       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11685       return;
11686     }
11687     case Intrinsic::riscv_clmul: {
11688       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
11689         return;
11690 
11691       SDValue NewOp0 =
11692           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11693       SDValue NewOp1 =
11694           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
11695       SDValue Res = DAG.getNode(RISCVISD::CLMUL, DL, MVT::i64, NewOp0, NewOp1);
11696       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11697       return;
11698     }
11699     case Intrinsic::riscv_clmulh:
11700     case Intrinsic::riscv_clmulr: {
11701       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
11702         return;
11703 
11704       // Extend inputs to XLen, and shift by 32. This will add 64 trailing zeros
11705       // to the full 128-bit clmul result of multiplying two xlen values.
11706       // Perform clmulr or clmulh on the shifted values. Finally, extract the
11707       // upper 32 bits.
11708       //
11709       // The alternative is to mask the inputs to 32 bits and use clmul, but
11710       // that requires two shifts to mask each input without zext.w.
11711       // FIXME: If the inputs are known zero extended or could be freely
11712       // zero extended, the mask form would be better.
11713       SDValue NewOp0 =
11714           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
11715       SDValue NewOp1 =
11716           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
11717       NewOp0 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0,
11718                            DAG.getConstant(32, DL, MVT::i64));
11719       NewOp1 = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp1,
11720                            DAG.getConstant(32, DL, MVT::i64));
11721       unsigned Opc = IntNo == Intrinsic::riscv_clmulh ? RISCVISD::CLMULH
11722                                                       : RISCVISD::CLMULR;
11723       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1);
11724       Res = DAG.getNode(ISD::SRL, DL, MVT::i64, Res,
11725                         DAG.getConstant(32, DL, MVT::i64));
11726       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
11727       return;
11728     }
11729     case Intrinsic::riscv_vmv_x_s: {
11730       EVT VT = N->getValueType(0);
11731       MVT XLenVT = Subtarget.getXLenVT();
11732       if (VT.bitsLT(XLenVT)) {
11733         // Simple case just extract using vmv.x.s and truncate.
11734         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
11735                                       Subtarget.getXLenVT(), N->getOperand(1));
11736         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
11737         return;
11738       }
11739 
11740       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
11741              "Unexpected custom legalization");
11742 
11743       // We need to do the move in two steps.
11744       SDValue Vec = N->getOperand(1);
11745       MVT VecVT = Vec.getSimpleValueType();
11746 
11747       // First extract the lower XLEN bits of the element.
11748       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
11749 
11750       // To extract the upper XLEN bits of the vector element, shift the first
11751       // element right by 32 bits and re-extract the lower XLEN bits.
11752       auto [Mask, VL] = getDefaultVLOps(1, VecVT, DL, DAG, Subtarget);
11753 
11754       SDValue ThirtyTwoV =
11755           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
11756                       DAG.getConstant(32, DL, XLenVT), VL);
11757       SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV,
11758                                    DAG.getUNDEF(VecVT), Mask, VL);
11759       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
11760 
11761       Results.push_back(
11762           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
11763       break;
11764     }
11765     }
11766     break;
11767   }
11768   case ISD::VECREDUCE_ADD:
11769   case ISD::VECREDUCE_AND:
11770   case ISD::VECREDUCE_OR:
11771   case ISD::VECREDUCE_XOR:
11772   case ISD::VECREDUCE_SMAX:
11773   case ISD::VECREDUCE_UMAX:
11774   case ISD::VECREDUCE_SMIN:
11775   case ISD::VECREDUCE_UMIN:
11776     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
11777       Results.push_back(V);
11778     break;
11779   case ISD::VP_REDUCE_ADD:
11780   case ISD::VP_REDUCE_AND:
11781   case ISD::VP_REDUCE_OR:
11782   case ISD::VP_REDUCE_XOR:
11783   case ISD::VP_REDUCE_SMAX:
11784   case ISD::VP_REDUCE_UMAX:
11785   case ISD::VP_REDUCE_SMIN:
11786   case ISD::VP_REDUCE_UMIN:
11787     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
11788       Results.push_back(V);
11789     break;
11790   case ISD::GET_ROUNDING: {
11791     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
11792     SDValue Res = DAG.getNode(ISD::GET_ROUNDING, DL, VTs, N->getOperand(0));
11793     Results.push_back(Res.getValue(0));
11794     Results.push_back(Res.getValue(1));
11795     break;
11796   }
11797   }
11798 }
11799 
11800 /// Given a binary operator, return the *associative* generic ISD::VECREDUCE_OP
11801 /// which corresponds to it.
11802 static unsigned getVecReduceOpcode(unsigned Opc) {
11803   switch (Opc) {
11804   default:
11805     llvm_unreachable("Unhandled binary to transfrom reduction");
11806   case ISD::ADD:
11807     return ISD::VECREDUCE_ADD;
11808   case ISD::UMAX:
11809     return ISD::VECREDUCE_UMAX;
11810   case ISD::SMAX:
11811     return ISD::VECREDUCE_SMAX;
11812   case ISD::UMIN:
11813     return ISD::VECREDUCE_UMIN;
11814   case ISD::SMIN:
11815     return ISD::VECREDUCE_SMIN;
11816   case ISD::AND:
11817     return ISD::VECREDUCE_AND;
11818   case ISD::OR:
11819     return ISD::VECREDUCE_OR;
11820   case ISD::XOR:
11821     return ISD::VECREDUCE_XOR;
11822   case ISD::FADD:
11823     // Note: This is the associative form of the generic reduction opcode.
11824     return ISD::VECREDUCE_FADD;
11825   }
11826 }
11827 
11828 /// Perform two related transforms whose purpose is to incrementally recognize
11829 /// an explode_vector followed by scalar reduction as a vector reduction node.
11830 /// This exists to recover from a deficiency in SLP which can't handle
11831 /// forests with multiple roots sharing common nodes.  In some cases, one
11832 /// of the trees will be vectorized, and the other will remain (unprofitably)
11833 /// scalarized.
11834 static SDValue
11835 combineBinOpOfExtractToReduceTree(SDNode *N, SelectionDAG &DAG,
11836                                   const RISCVSubtarget &Subtarget) {
11837 
11838   // This transforms need to run before all integer types have been legalized
11839   // to i64 (so that the vector element type matches the add type), and while
11840   // it's safe to introduce odd sized vector types.
11841   if (DAG.NewNodesMustHaveLegalTypes)
11842     return SDValue();
11843 
11844   // Without V, this transform isn't useful.  We could form the (illegal)
11845   // operations and let them be scalarized again, but there's really no point.
11846   if (!Subtarget.hasVInstructions())
11847     return SDValue();
11848 
11849   const SDLoc DL(N);
11850   const EVT VT = N->getValueType(0);
11851   const unsigned Opc = N->getOpcode();
11852 
11853   // For FADD, we only handle the case with reassociation allowed.  We
11854   // could handle strict reduction order, but at the moment, there's no
11855   // known reason to, and the complexity isn't worth it.
11856   // TODO: Handle fminnum and fmaxnum here
11857   if (!VT.isInteger() &&
11858       (Opc != ISD::FADD || !N->getFlags().hasAllowReassociation()))
11859     return SDValue();
11860 
11861   const unsigned ReduceOpc = getVecReduceOpcode(Opc);
11862   assert(Opc == ISD::getVecReduceBaseOpcode(ReduceOpc) &&
11863          "Inconsistent mappings");
11864   SDValue LHS = N->getOperand(0);
11865   SDValue RHS = N->getOperand(1);
11866 
11867   if (!LHS.hasOneUse() || !RHS.hasOneUse())
11868     return SDValue();
11869 
11870   if (RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11871     std::swap(LHS, RHS);
11872 
11873   if (RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
11874       !isa<ConstantSDNode>(RHS.getOperand(1)))
11875     return SDValue();
11876 
11877   uint64_t RHSIdx = cast<ConstantSDNode>(RHS.getOperand(1))->getLimitedValue();
11878   SDValue SrcVec = RHS.getOperand(0);
11879   EVT SrcVecVT = SrcVec.getValueType();
11880   assert(SrcVecVT.getVectorElementType() == VT);
11881   if (SrcVecVT.isScalableVector())
11882     return SDValue();
11883 
11884   if (SrcVecVT.getScalarSizeInBits() > Subtarget.getELen())
11885     return SDValue();
11886 
11887   // match binop (extract_vector_elt V, 0), (extract_vector_elt V, 1) to
11888   // reduce_op (extract_subvector [2 x VT] from V).  This will form the
11889   // root of our reduction tree. TODO: We could extend this to any two
11890   // adjacent aligned constant indices if desired.
11891   if (LHS.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
11892       LHS.getOperand(0) == SrcVec && isa<ConstantSDNode>(LHS.getOperand(1))) {
11893     uint64_t LHSIdx =
11894       cast<ConstantSDNode>(LHS.getOperand(1))->getLimitedValue();
11895     if (0 == std::min(LHSIdx, RHSIdx) && 1 == std::max(LHSIdx, RHSIdx)) {
11896       EVT ReduceVT = EVT::getVectorVT(*DAG.getContext(), VT, 2);
11897       SDValue Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ReduceVT, SrcVec,
11898                                 DAG.getVectorIdxConstant(0, DL));
11899       return DAG.getNode(ReduceOpc, DL, VT, Vec, N->getFlags());
11900     }
11901   }
11902 
11903   // Match (binop (reduce (extract_subvector V, 0),
11904   //                      (extract_vector_elt V, sizeof(SubVec))))
11905   // into a reduction of one more element from the original vector V.
11906   if (LHS.getOpcode() != ReduceOpc)
11907     return SDValue();
11908 
11909   SDValue ReduceVec = LHS.getOperand(0);
11910   if (ReduceVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
11911       ReduceVec.hasOneUse() && ReduceVec.getOperand(0) == RHS.getOperand(0) &&
11912       isNullConstant(ReduceVec.getOperand(1)) &&
11913       ReduceVec.getValueType().getVectorNumElements() == RHSIdx) {
11914     // For illegal types (e.g. 3xi32), most will be combined again into a
11915     // wider (hopefully legal) type.  If this is a terminal state, we are
11916     // relying on type legalization here to produce something reasonable
11917     // and this lowering quality could probably be improved. (TODO)
11918     EVT ReduceVT = EVT::getVectorVT(*DAG.getContext(), VT, RHSIdx + 1);
11919     SDValue Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ReduceVT, SrcVec,
11920                               DAG.getVectorIdxConstant(0, DL));
11921     auto Flags = ReduceVec->getFlags();
11922     Flags.intersectWith(N->getFlags());
11923     return DAG.getNode(ReduceOpc, DL, VT, Vec, Flags);
11924   }
11925 
11926   return SDValue();
11927 }
11928 
11929 
11930 // Try to fold (<bop> x, (reduction.<bop> vec, start))
11931 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
11932                                     const RISCVSubtarget &Subtarget) {
11933   auto BinOpToRVVReduce = [](unsigned Opc) {
11934     switch (Opc) {
11935     default:
11936       llvm_unreachable("Unhandled binary to transfrom reduction");
11937     case ISD::ADD:
11938       return RISCVISD::VECREDUCE_ADD_VL;
11939     case ISD::UMAX:
11940       return RISCVISD::VECREDUCE_UMAX_VL;
11941     case ISD::SMAX:
11942       return RISCVISD::VECREDUCE_SMAX_VL;
11943     case ISD::UMIN:
11944       return RISCVISD::VECREDUCE_UMIN_VL;
11945     case ISD::SMIN:
11946       return RISCVISD::VECREDUCE_SMIN_VL;
11947     case ISD::AND:
11948       return RISCVISD::VECREDUCE_AND_VL;
11949     case ISD::OR:
11950       return RISCVISD::VECREDUCE_OR_VL;
11951     case ISD::XOR:
11952       return RISCVISD::VECREDUCE_XOR_VL;
11953     case ISD::FADD:
11954       return RISCVISD::VECREDUCE_FADD_VL;
11955     case ISD::FMAXNUM:
11956       return RISCVISD::VECREDUCE_FMAX_VL;
11957     case ISD::FMINNUM:
11958       return RISCVISD::VECREDUCE_FMIN_VL;
11959     }
11960   };
11961 
11962   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
11963     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
11964            isNullConstant(V.getOperand(1)) &&
11965            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
11966   };
11967 
11968   unsigned Opc = N->getOpcode();
11969   unsigned ReduceIdx;
11970   if (IsReduction(N->getOperand(0), Opc))
11971     ReduceIdx = 0;
11972   else if (IsReduction(N->getOperand(1), Opc))
11973     ReduceIdx = 1;
11974   else
11975     return SDValue();
11976 
11977   // Skip if FADD disallows reassociation but the combiner needs.
11978   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
11979     return SDValue();
11980 
11981   SDValue Extract = N->getOperand(ReduceIdx);
11982   SDValue Reduce = Extract.getOperand(0);
11983   if (!Extract.hasOneUse() || !Reduce.hasOneUse())
11984     return SDValue();
11985 
11986   SDValue ScalarV = Reduce.getOperand(2);
11987   EVT ScalarVT = ScalarV.getValueType();
11988   if (ScalarV.getOpcode() == ISD::INSERT_SUBVECTOR &&
11989       ScalarV.getOperand(0)->isUndef() &&
11990       isNullConstant(ScalarV.getOperand(2)))
11991     ScalarV = ScalarV.getOperand(1);
11992 
11993   // Make sure that ScalarV is a splat with VL=1.
11994   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
11995       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
11996       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
11997     return SDValue();
11998 
11999   if (!isNonZeroAVL(ScalarV.getOperand(2)))
12000     return SDValue();
12001 
12002   // Check the scalar of ScalarV is neutral element
12003   // TODO: Deal with value other than neutral element.
12004   if (!isNeutralConstant(N->getOpcode(), N->getFlags(), ScalarV.getOperand(1),
12005                          0))
12006     return SDValue();
12007 
12008   // If the AVL is zero, operand 0 will be returned. So it's not safe to fold.
12009   // FIXME: We might be able to improve this if operand 0 is undef.
12010   if (!isNonZeroAVL(Reduce.getOperand(5)))
12011     return SDValue();
12012 
12013   SDValue NewStart = N->getOperand(1 - ReduceIdx);
12014 
12015   SDLoc DL(N);
12016   SDValue NewScalarV =
12017       lowerScalarInsert(NewStart, ScalarV.getOperand(2),
12018                         ScalarV.getSimpleValueType(), DL, DAG, Subtarget);
12019 
12020   // If we looked through an INSERT_SUBVECTOR we need to restore it.
12021   if (ScalarVT != ScalarV.getValueType())
12022     NewScalarV =
12023         DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ScalarVT, DAG.getUNDEF(ScalarVT),
12024                     NewScalarV, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
12025 
12026   SDValue Ops[] = {Reduce.getOperand(0), Reduce.getOperand(1),
12027                    NewScalarV,           Reduce.getOperand(3),
12028                    Reduce.getOperand(4), Reduce.getOperand(5)};
12029   SDValue NewReduce =
12030       DAG.getNode(Reduce.getOpcode(), DL, Reduce.getValueType(), Ops);
12031   return DAG.getNode(Extract.getOpcode(), DL, Extract.getValueType(), NewReduce,
12032                      Extract.getOperand(1));
12033 }
12034 
12035 // Optimize (add (shl x, c0), (shl y, c1)) ->
12036 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
12037 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
12038                                   const RISCVSubtarget &Subtarget) {
12039   // Perform this optimization only in the zba extension.
12040   if (!Subtarget.hasStdExtZba())
12041     return SDValue();
12042 
12043   // Skip for vector types and larger types.
12044   EVT VT = N->getValueType(0);
12045   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
12046     return SDValue();
12047 
12048   // The two operand nodes must be SHL and have no other use.
12049   SDValue N0 = N->getOperand(0);
12050   SDValue N1 = N->getOperand(1);
12051   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
12052       !N0->hasOneUse() || !N1->hasOneUse())
12053     return SDValue();
12054 
12055   // Check c0 and c1.
12056   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12057   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
12058   if (!N0C || !N1C)
12059     return SDValue();
12060   int64_t C0 = N0C->getSExtValue();
12061   int64_t C1 = N1C->getSExtValue();
12062   if (C0 <= 0 || C1 <= 0)
12063     return SDValue();
12064 
12065   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
12066   int64_t Bits = std::min(C0, C1);
12067   int64_t Diff = std::abs(C0 - C1);
12068   if (Diff != 1 && Diff != 2 && Diff != 3)
12069     return SDValue();
12070 
12071   // Build nodes.
12072   SDLoc DL(N);
12073   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
12074   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
12075   SDValue NA0 =
12076       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
12077   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
12078   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
12079 }
12080 
12081 // Combine a constant select operand into its use:
12082 //
12083 // (and (select cond, -1, c), x)
12084 //   -> (select cond, x, (and x, c))  [AllOnes=1]
12085 // (or  (select cond, 0, c), x)
12086 //   -> (select cond, x, (or x, c))  [AllOnes=0]
12087 // (xor (select cond, 0, c), x)
12088 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
12089 // (add (select cond, 0, c), x)
12090 //   -> (select cond, x, (add x, c))  [AllOnes=0]
12091 // (sub x, (select cond, 0, c))
12092 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
12093 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
12094                                    SelectionDAG &DAG, bool AllOnes,
12095                                    const RISCVSubtarget &Subtarget) {
12096   EVT VT = N->getValueType(0);
12097 
12098   // Skip vectors.
12099   if (VT.isVector())
12100     return SDValue();
12101 
12102   if (!Subtarget.hasShortForwardBranchOpt()) {
12103     // (select cond, x, (and x, c)) has custom lowering with Zicond.
12104     if ((!Subtarget.hasStdExtZicond() &&
12105          !Subtarget.hasVendorXVentanaCondOps()) ||
12106         N->getOpcode() != ISD::AND)
12107       return SDValue();
12108 
12109     // Maybe harmful when condition code has multiple use.
12110     if (Slct.getOpcode() == ISD::SELECT && !Slct.getOperand(0).hasOneUse())
12111       return SDValue();
12112 
12113     // Maybe harmful when VT is wider than XLen.
12114     if (VT.getSizeInBits() > Subtarget.getXLen())
12115       return SDValue();
12116   }
12117 
12118   if ((Slct.getOpcode() != ISD::SELECT &&
12119        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
12120       !Slct.hasOneUse())
12121     return SDValue();
12122 
12123   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
12124     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
12125   };
12126 
12127   bool SwapSelectOps;
12128   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
12129   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
12130   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
12131   SDValue NonConstantVal;
12132   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
12133     SwapSelectOps = false;
12134     NonConstantVal = FalseVal;
12135   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
12136     SwapSelectOps = true;
12137     NonConstantVal = TrueVal;
12138   } else
12139     return SDValue();
12140 
12141   // Slct is now know to be the desired identity constant when CC is true.
12142   TrueVal = OtherOp;
12143   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
12144   // Unless SwapSelectOps says the condition should be false.
12145   if (SwapSelectOps)
12146     std::swap(TrueVal, FalseVal);
12147 
12148   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
12149     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
12150                        {Slct.getOperand(0), Slct.getOperand(1),
12151                         Slct.getOperand(2), TrueVal, FalseVal});
12152 
12153   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
12154                      {Slct.getOperand(0), TrueVal, FalseVal});
12155 }
12156 
12157 // Attempt combineSelectAndUse on each operand of a commutative operator N.
12158 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
12159                                               bool AllOnes,
12160                                               const RISCVSubtarget &Subtarget) {
12161   SDValue N0 = N->getOperand(0);
12162   SDValue N1 = N->getOperand(1);
12163   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes, Subtarget))
12164     return Result;
12165   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes, Subtarget))
12166     return Result;
12167   return SDValue();
12168 }
12169 
12170 // Transform (add (mul x, c0), c1) ->
12171 //           (add (mul (add x, c1/c0), c0), c1%c0).
12172 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
12173 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
12174 // to an infinite loop in DAGCombine if transformed.
12175 // Or transform (add (mul x, c0), c1) ->
12176 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
12177 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
12178 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
12179 // lead to an infinite loop in DAGCombine if transformed.
12180 // Or transform (add (mul x, c0), c1) ->
12181 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
12182 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
12183 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
12184 // lead to an infinite loop in DAGCombine if transformed.
12185 // Or transform (add (mul x, c0), c1) ->
12186 //              (mul (add x, c1/c0), c0).
12187 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
12188 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
12189                                      const RISCVSubtarget &Subtarget) {
12190   // Skip for vector types and larger types.
12191   EVT VT = N->getValueType(0);
12192   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
12193     return SDValue();
12194   // The first operand node must be a MUL and has no other use.
12195   SDValue N0 = N->getOperand(0);
12196   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
12197     return SDValue();
12198   // Check if c0 and c1 match above conditions.
12199   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12200   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12201   if (!N0C || !N1C)
12202     return SDValue();
12203   // If N0C has multiple uses it's possible one of the cases in
12204   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
12205   // in an infinite loop.
12206   if (!N0C->hasOneUse())
12207     return SDValue();
12208   int64_t C0 = N0C->getSExtValue();
12209   int64_t C1 = N1C->getSExtValue();
12210   int64_t CA, CB;
12211   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
12212     return SDValue();
12213   // Search for proper CA (non-zero) and CB that both are simm12.
12214   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
12215       !isInt<12>(C0 * (C1 / C0))) {
12216     CA = C1 / C0;
12217     CB = C1 % C0;
12218   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
12219              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
12220     CA = C1 / C0 + 1;
12221     CB = C1 % C0 - C0;
12222   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
12223              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
12224     CA = C1 / C0 - 1;
12225     CB = C1 % C0 + C0;
12226   } else
12227     return SDValue();
12228   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
12229   SDLoc DL(N);
12230   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
12231                              DAG.getConstant(CA, DL, VT));
12232   SDValue New1 =
12233       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
12234   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
12235 }
12236 
12237 // Try to turn (add (xor bool, 1) -1) into (neg bool).
12238 static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {
12239   SDValue N0 = N->getOperand(0);
12240   SDValue N1 = N->getOperand(1);
12241   EVT VT = N->getValueType(0);
12242   SDLoc DL(N);
12243 
12244   // RHS should be -1.
12245   if (!isAllOnesConstant(N1))
12246     return SDValue();
12247 
12248   // Look for (xor X, 1).
12249   if (N0.getOpcode() != ISD::XOR || !isOneConstant(N0.getOperand(1)))
12250     return SDValue();
12251 
12252   // First xor input should be 0 or 1.
12253   APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
12254   if (!DAG.MaskedValueIsZero(N0.getOperand(0), Mask))
12255     return SDValue();
12256 
12257   // Emit a negate of the setcc.
12258   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
12259                      N0.getOperand(0));
12260 }
12261 
12262 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
12263                                  const RISCVSubtarget &Subtarget) {
12264   if (SDValue V = combineAddOfBooleanXor(N, DAG))
12265     return V;
12266   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
12267     return V;
12268   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
12269     return V;
12270   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
12271     return V;
12272   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
12273     return V;
12274 
12275   // fold (add (select lhs, rhs, cc, 0, y), x) ->
12276   //      (select lhs, rhs, cc, x, (add x, y))
12277   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
12278 }
12279 
12280 // Try to turn a sub boolean RHS and constant LHS into an addi.
12281 static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG) {
12282   SDValue N0 = N->getOperand(0);
12283   SDValue N1 = N->getOperand(1);
12284   EVT VT = N->getValueType(0);
12285   SDLoc DL(N);
12286 
12287   // Require a constant LHS.
12288   auto *N0C = dyn_cast<ConstantSDNode>(N0);
12289   if (!N0C)
12290     return SDValue();
12291 
12292   // All our optimizations involve subtracting 1 from the immediate and forming
12293   // an ADDI. Make sure the new immediate is valid for an ADDI.
12294   APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
12295   if (!ImmValMinus1.isSignedIntN(12))
12296     return SDValue();
12297 
12298   SDValue NewLHS;
12299   if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse()) {
12300     // (sub constant, (setcc x, y, eq/neq)) ->
12301     // (add (setcc x, y, neq/eq), constant - 1)
12302     ISD::CondCode CCVal = cast<CondCodeSDNode>(N1.getOperand(2))->get();
12303     EVT SetCCOpVT = N1.getOperand(0).getValueType();
12304     if (!isIntEqualitySetCC(CCVal) || !SetCCOpVT.isInteger())
12305       return SDValue();
12306     CCVal = ISD::getSetCCInverse(CCVal, SetCCOpVT);
12307     NewLHS =
12308         DAG.getSetCC(SDLoc(N1), VT, N1.getOperand(0), N1.getOperand(1), CCVal);
12309   } else if (N1.getOpcode() == ISD::XOR && isOneConstant(N1.getOperand(1)) &&
12310              N1.getOperand(0).getOpcode() == ISD::SETCC) {
12311     // (sub C, (xor (setcc), 1)) -> (add (setcc), C-1).
12312     // Since setcc returns a bool the xor is equivalent to 1-setcc.
12313     NewLHS = N1.getOperand(0);
12314   } else
12315     return SDValue();
12316 
12317   SDValue NewRHS = DAG.getConstant(ImmValMinus1, DL, VT);
12318   return DAG.getNode(ISD::ADD, DL, VT, NewLHS, NewRHS);
12319 }
12320 
12321 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
12322                                  const RISCVSubtarget &Subtarget) {
12323   if (SDValue V = combineSubOfBoolean(N, DAG))
12324     return V;
12325 
12326   SDValue N0 = N->getOperand(0);
12327   SDValue N1 = N->getOperand(1);
12328   // fold (sub 0, (setcc x, 0, setlt)) -> (sra x, xlen - 1)
12329   if (isNullConstant(N0) && N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
12330       isNullConstant(N1.getOperand(1))) {
12331     ISD::CondCode CCVal = cast<CondCodeSDNode>(N1.getOperand(2))->get();
12332     if (CCVal == ISD::SETLT) {
12333       EVT VT = N->getValueType(0);
12334       SDLoc DL(N);
12335       unsigned ShAmt = N0.getValueSizeInBits() - 1;
12336       return DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0),
12337                          DAG.getConstant(ShAmt, DL, VT));
12338     }
12339   }
12340 
12341   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
12342   //      (select lhs, rhs, cc, x, (sub x, y))
12343   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);
12344 }
12345 
12346 // Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
12347 // Legalizing setcc can introduce xors like this. Doing this transform reduces
12348 // the number of xors and may allow the xor to fold into a branch condition.
12349 static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
12350   SDValue N0 = N->getOperand(0);
12351   SDValue N1 = N->getOperand(1);
12352   bool IsAnd = N->getOpcode() == ISD::AND;
12353 
12354   if (N0.getOpcode() != ISD::XOR || N1.getOpcode() != ISD::XOR)
12355     return SDValue();
12356 
12357   if (!N0.hasOneUse() || !N1.hasOneUse())
12358     return SDValue();
12359 
12360   SDValue N01 = N0.getOperand(1);
12361   SDValue N11 = N1.getOperand(1);
12362 
12363   // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into
12364   // (xor X, -1) based on the upper bits of the other operand being 0. If the
12365   // operation is And, allow one of the Xors to use -1.
12366   if (isOneConstant(N01)) {
12367     if (!isOneConstant(N11) && !(IsAnd && isAllOnesConstant(N11)))
12368       return SDValue();
12369   } else if (isOneConstant(N11)) {
12370     // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1.
12371     if (!(IsAnd && isAllOnesConstant(N01)))
12372       return SDValue();
12373   } else
12374     return SDValue();
12375 
12376   EVT VT = N->getValueType(0);
12377 
12378   SDValue N00 = N0.getOperand(0);
12379   SDValue N10 = N1.getOperand(0);
12380 
12381   // The LHS of the xors needs to be 0/1.
12382   APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
12383   if (!DAG.MaskedValueIsZero(N00, Mask) || !DAG.MaskedValueIsZero(N10, Mask))
12384     return SDValue();
12385 
12386   // Invert the opcode and insert a new xor.
12387   SDLoc DL(N);
12388   unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
12389   SDValue Logic = DAG.getNode(Opc, DL, VT, N00, N10);
12390   return DAG.getNode(ISD::XOR, DL, VT, Logic, DAG.getConstant(1, DL, VT));
12391 }
12392 
12393 static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
12394                                       const RISCVSubtarget &Subtarget) {
12395   SDValue N0 = N->getOperand(0);
12396   EVT VT = N->getValueType(0);
12397 
12398   // Pre-promote (i1 (truncate (srl X, Y))) on RV64 with Zbs without zero
12399   // extending X. This is safe since we only need the LSB after the shift and
12400   // shift amounts larger than 31 would produce poison. If we wait until
12401   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
12402   // to use a BEXT instruction.
12403   if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && VT == MVT::i1 &&
12404       N0.getValueType() == MVT::i32 && N0.getOpcode() == ISD::SRL &&
12405       !isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
12406     SDLoc DL(N0);
12407     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
12408     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
12409     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
12410     return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Srl);
12411   }
12412 
12413   return SDValue();
12414 }
12415 
12416 // Combines two comparison operation and logic operation to one selection
12417 // operation(min, max) and logic operation. Returns new constructed Node if
12418 // conditions for optimization are satisfied.
12419 static SDValue performANDCombine(SDNode *N,
12420                                  TargetLowering::DAGCombinerInfo &DCI,
12421                                  const RISCVSubtarget &Subtarget) {
12422   SelectionDAG &DAG = DCI.DAG;
12423 
12424   SDValue N0 = N->getOperand(0);
12425   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
12426   // extending X. This is safe since we only need the LSB after the shift and
12427   // shift amounts larger than 31 would produce poison. If we wait until
12428   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
12429   // to use a BEXT instruction.
12430   if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
12431       N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
12432       N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
12433       N0.hasOneUse()) {
12434     SDLoc DL(N);
12435     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
12436     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
12437     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
12438     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
12439                               DAG.getConstant(1, DL, MVT::i64));
12440     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
12441   }
12442 
12443   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
12444     return V;
12445   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
12446     return V;
12447 
12448   if (DCI.isAfterLegalizeDAG())
12449     if (SDValue V = combineDeMorganOfBoolean(N, DAG))
12450       return V;
12451 
12452   // fold (and (select lhs, rhs, cc, -1, y), x) ->
12453   //      (select lhs, rhs, cc, x, (and x, y))
12454   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true, Subtarget);
12455 }
12456 
12457 // Try to pull an xor with 1 through a select idiom that uses czero_eqz/nez.
12458 // FIXME: Generalize to other binary operators with same operand.
12459 static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1,
12460                                 SelectionDAG &DAG) {
12461   assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
12462 
12463   if (N0.getOpcode() != RISCVISD::CZERO_EQZ ||
12464       N1.getOpcode() != RISCVISD::CZERO_NEZ ||
12465       !N0.hasOneUse() || !N1.hasOneUse())
12466     return SDValue();
12467 
12468   // Should have the same condition.
12469   SDValue Cond = N0.getOperand(1);
12470   if (Cond != N1.getOperand(1))
12471     return SDValue();
12472 
12473   SDValue TrueV = N0.getOperand(0);
12474   SDValue FalseV = N1.getOperand(0);
12475 
12476   if (TrueV.getOpcode() != ISD::XOR || FalseV.getOpcode() != ISD::XOR ||
12477       TrueV.getOperand(1) != FalseV.getOperand(1) ||
12478       !isOneConstant(TrueV.getOperand(1)) ||
12479       !TrueV.hasOneUse() || !FalseV.hasOneUse())
12480     return SDValue();
12481 
12482   EVT VT = N->getValueType(0);
12483   SDLoc DL(N);
12484 
12485   SDValue NewN0 = DAG.getNode(RISCVISD::CZERO_EQZ, DL, VT, TrueV.getOperand(0),
12486                               Cond);
12487   SDValue NewN1 = DAG.getNode(RISCVISD::CZERO_NEZ, DL, VT, FalseV.getOperand(0),
12488                               Cond);
12489   SDValue NewOr = DAG.getNode(ISD::OR, DL, VT, NewN0, NewN1);
12490   return DAG.getNode(ISD::XOR, DL, VT, NewOr, TrueV.getOperand(1));
12491 }
12492 
12493 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
12494                                 const RISCVSubtarget &Subtarget) {
12495   SelectionDAG &DAG = DCI.DAG;
12496 
12497   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
12498     return V;
12499   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
12500     return V;
12501 
12502   if (DCI.isAfterLegalizeDAG())
12503     if (SDValue V = combineDeMorganOfBoolean(N, DAG))
12504       return V;
12505 
12506   // Look for Or of CZERO_EQZ/NEZ with same condition which is the select idiom.
12507   // We may be able to pull a common operation out of the true and false value.
12508   SDValue N0 = N->getOperand(0);
12509   SDValue N1 = N->getOperand(1);
12510   if (SDValue V = combineOrOfCZERO(N, N0, N1, DAG))
12511     return V;
12512   if (SDValue V = combineOrOfCZERO(N, N1, N0, DAG))
12513     return V;
12514 
12515   // fold (or (select cond, 0, y), x) ->
12516   //      (select cond, x, (or x, y))
12517   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
12518 }
12519 
12520 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
12521                                  const RISCVSubtarget &Subtarget) {
12522   SDValue N0 = N->getOperand(0);
12523   SDValue N1 = N->getOperand(1);
12524 
12525   // Pre-promote (i32 (xor (shl -1, X), ~0)) on RV64 with Zbs so we can use
12526   // (ADDI (BSET X0, X), -1). If we wait until/ type legalization, we'll create
12527   // RISCVISD:::SLLW and we can't recover it to use a BSET instruction.
12528   if (!RV64LegalI32 && Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
12529       N->getValueType(0) == MVT::i32 && isAllOnesConstant(N1) &&
12530       N0.getOpcode() == ISD::SHL && isAllOnesConstant(N0.getOperand(0)) &&
12531       !isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
12532     SDLoc DL(N);
12533     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
12534     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
12535     SDValue Shl = DAG.getNode(ISD::SHL, DL, MVT::i64, Op0, Op1);
12536     SDValue And = DAG.getNOT(DL, Shl, MVT::i64);
12537     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
12538   }
12539 
12540   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
12541   // NOTE: Assumes ROL being legal means ROLW is legal.
12542   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12543   if (N0.getOpcode() == RISCVISD::SLLW &&
12544       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
12545       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
12546     SDLoc DL(N);
12547     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
12548                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
12549   }
12550 
12551   // Fold (xor (setcc constant, y, setlt), 1) -> (setcc y, constant + 1, setlt)
12552   if (N0.getOpcode() == ISD::SETCC && isOneConstant(N1) && N0.hasOneUse()) {
12553     auto *ConstN00 = dyn_cast<ConstantSDNode>(N0.getOperand(0));
12554     ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
12555     if (ConstN00 && CC == ISD::SETLT) {
12556       EVT VT = N0.getValueType();
12557       SDLoc DL(N0);
12558       const APInt &Imm = ConstN00->getAPIntValue();
12559       if ((Imm + 1).isSignedIntN(12))
12560         return DAG.getSetCC(DL, VT, N0.getOperand(1),
12561                             DAG.getConstant(Imm + 1, DL, VT), CC);
12562     }
12563   }
12564 
12565   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
12566     return V;
12567   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
12568     return V;
12569 
12570   // fold (xor (select cond, 0, y), x) ->
12571   //      (select cond, x, (xor x, y))
12572   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
12573 }
12574 
12575 static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG) {
12576   EVT VT = N->getValueType(0);
12577   if (!VT.isVector())
12578     return SDValue();
12579 
12580   SDLoc DL(N);
12581   SDValue N0 = N->getOperand(0);
12582   SDValue N1 = N->getOperand(1);
12583   SDValue MulOper;
12584   unsigned AddSubOpc;
12585 
12586   // vmadd: (mul (add x, 1), y) -> (add (mul x, y), y)
12587   //        (mul x, add (y, 1)) -> (add x, (mul x, y))
12588   // vnmsub: (mul (sub 1, x), y) -> (sub y, (mul x, y))
12589   //         (mul x, (sub 1, y)) -> (sub x, (mul x, y))
12590   auto IsAddSubWith1 = [&](SDValue V) -> bool {
12591     AddSubOpc = V->getOpcode();
12592     if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
12593       SDValue Opnd = V->getOperand(1);
12594       MulOper = V->getOperand(0);
12595       if (AddSubOpc == ISD::SUB)
12596         std::swap(Opnd, MulOper);
12597       if (isOneOrOneSplat(Opnd))
12598         return true;
12599     }
12600     return false;
12601   };
12602 
12603   if (IsAddSubWith1(N0)) {
12604     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
12605     return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
12606   }
12607 
12608   if (IsAddSubWith1(N1)) {
12609     SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
12610     return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
12611   }
12612 
12613   return SDValue();
12614 }
12615 
12616 /// According to the property that indexed load/store instructions zero-extend
12617 /// their indices, try to narrow the type of index operand.
12618 static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
12619   if (isIndexTypeSigned(IndexType))
12620     return false;
12621 
12622   if (!N->hasOneUse())
12623     return false;
12624 
12625   EVT VT = N.getValueType();
12626   SDLoc DL(N);
12627 
12628   // In general, what we're doing here is seeing if we can sink a truncate to
12629   // a smaller element type into the expression tree building our index.
12630   // TODO: We can generalize this and handle a bunch more cases if useful.
12631 
12632   // Narrow a buildvector to the narrowest element type.  This requires less
12633   // work and less register pressure at high LMUL, and creates smaller constants
12634   // which may be cheaper to materialize.
12635   if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) {
12636     KnownBits Known = DAG.computeKnownBits(N);
12637     unsigned ActiveBits = std::max(8u, Known.countMaxActiveBits());
12638     LLVMContext &C = *DAG.getContext();
12639     EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
12640     if (ResultVT.bitsLT(VT.getVectorElementType())) {
12641       N = DAG.getNode(ISD::TRUNCATE, DL,
12642                       VT.changeVectorElementType(ResultVT), N);
12643       return true;
12644     }
12645   }
12646 
12647   // Handle the pattern (shl (zext x to ty), C) and bits(x) + C < bits(ty).
12648   if (N.getOpcode() != ISD::SHL)
12649     return false;
12650 
12651   SDValue N0 = N.getOperand(0);
12652   if (N0.getOpcode() != ISD::ZERO_EXTEND &&
12653       N0.getOpcode() != RISCVISD::VZEXT_VL)
12654     return false;;
12655   if (!N0->hasOneUse())
12656     return false;;
12657 
12658   APInt ShAmt;
12659   SDValue N1 = N.getOperand(1);
12660   if (!ISD::isConstantSplatVector(N1.getNode(), ShAmt))
12661     return false;;
12662 
12663   SDValue Src = N0.getOperand(0);
12664   EVT SrcVT = Src.getValueType();
12665   unsigned SrcElen = SrcVT.getScalarSizeInBits();
12666   unsigned ShAmtV = ShAmt.getZExtValue();
12667   unsigned NewElen = PowerOf2Ceil(SrcElen + ShAmtV);
12668   NewElen = std::max(NewElen, 8U);
12669 
12670   // Skip if NewElen is not narrower than the original extended type.
12671   if (NewElen >= N0.getValueType().getScalarSizeInBits())
12672     return false;
12673 
12674   EVT NewEltVT = EVT::getIntegerVT(*DAG.getContext(), NewElen);
12675   EVT NewVT = SrcVT.changeVectorElementType(NewEltVT);
12676 
12677   SDValue NewExt = DAG.getNode(N0->getOpcode(), DL, NewVT, N0->ops());
12678   SDValue NewShAmtVec = DAG.getConstant(ShAmtV, DL, NewVT);
12679   N = DAG.getNode(ISD::SHL, DL, NewVT, NewExt, NewShAmtVec);
12680   return true;
12681 }
12682 
12683 // Replace (seteq (i64 (and X, 0xffffffff)), C1) with
12684 // (seteq (i64 (sext_inreg (X, i32)), C1')) where C1' is C1 sign extended from
12685 // bit 31. Same for setne. C1' may be cheaper to materialize and the sext_inreg
12686 // can become a sext.w instead of a shift pair.
12687 static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
12688                                    const RISCVSubtarget &Subtarget) {
12689   SDValue N0 = N->getOperand(0);
12690   SDValue N1 = N->getOperand(1);
12691   EVT VT = N->getValueType(0);
12692   EVT OpVT = N0.getValueType();
12693 
12694   if (OpVT != MVT::i64 || !Subtarget.is64Bit())
12695     return SDValue();
12696 
12697   // RHS needs to be a constant.
12698   auto *N1C = dyn_cast<ConstantSDNode>(N1);
12699   if (!N1C)
12700     return SDValue();
12701 
12702   // LHS needs to be (and X, 0xffffffff).
12703   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
12704       !isa<ConstantSDNode>(N0.getOperand(1)) ||
12705       N0.getConstantOperandVal(1) != UINT64_C(0xffffffff))
12706     return SDValue();
12707 
12708   // Looking for an equality compare.
12709   ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
12710   if (!isIntEqualitySetCC(Cond))
12711     return SDValue();
12712 
12713   // Don't do this if the sign bit is provably zero, it will be turned back into
12714   // an AND.
12715   APInt SignMask = APInt::getOneBitSet(64, 31);
12716   if (DAG.MaskedValueIsZero(N0.getOperand(0), SignMask))
12717     return SDValue();
12718 
12719   const APInt &C1 = N1C->getAPIntValue();
12720 
12721   SDLoc dl(N);
12722   // If the constant is larger than 2^32 - 1 it is impossible for both sides
12723   // to be equal.
12724   if (C1.getActiveBits() > 32)
12725     return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT);
12726 
12727   SDValue SExtOp = DAG.getNode(ISD::SIGN_EXTEND_INREG, N, OpVT,
12728                                N0.getOperand(0), DAG.getValueType(MVT::i32));
12729   return DAG.getSetCC(dl, VT, SExtOp, DAG.getConstant(C1.trunc(32).sext(64),
12730                                                       dl, OpVT), Cond);
12731 }
12732 
12733 static SDValue
12734 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
12735                                 const RISCVSubtarget &Subtarget) {
12736   SDValue Src = N->getOperand(0);
12737   EVT VT = N->getValueType(0);
12738 
12739   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
12740   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
12741       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
12742     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
12743                        Src.getOperand(0));
12744 
12745   return SDValue();
12746 }
12747 
12748 namespace {
12749 // Forward declaration of the structure holding the necessary information to
12750 // apply a combine.
12751 struct CombineResult;
12752 
12753 /// Helper class for folding sign/zero extensions.
12754 /// In particular, this class is used for the following combines:
12755 /// add_vl -> vwadd(u) | vwadd(u)_w
12756 /// sub_vl -> vwsub(u) | vwsub(u)_w
12757 /// mul_vl -> vwmul(u) | vwmul_su
12758 ///
12759 /// An object of this class represents an operand of the operation we want to
12760 /// combine.
12761 /// E.g., when trying to combine `mul_vl a, b`, we will have one instance of
12762 /// NodeExtensionHelper for `a` and one for `b`.
12763 ///
12764 /// This class abstracts away how the extension is materialized and
12765 /// how its Mask, VL, number of users affect the combines.
12766 ///
12767 /// In particular:
12768 /// - VWADD_W is conceptually == add(op0, sext(op1))
12769 /// - VWADDU_W == add(op0, zext(op1))
12770 /// - VWSUB_W == sub(op0, sext(op1))
12771 /// - VWSUBU_W == sub(op0, zext(op1))
12772 ///
12773 /// And VMV_V_X_VL, depending on the value, is conceptually equivalent to
12774 /// zext|sext(smaller_value).
12775 struct NodeExtensionHelper {
12776   /// Records if this operand is like being zero extended.
12777   bool SupportsZExt;
12778   /// Records if this operand is like being sign extended.
12779   /// Note: SupportsZExt and SupportsSExt are not mutually exclusive. For
12780   /// instance, a splat constant (e.g., 3), would support being both sign and
12781   /// zero extended.
12782   bool SupportsSExt;
12783   /// This boolean captures whether we care if this operand would still be
12784   /// around after the folding happens.
12785   bool EnforceOneUse;
12786   /// Records if this operand's mask needs to match the mask of the operation
12787   /// that it will fold into.
12788   bool CheckMask;
12789   /// Value of the Mask for this operand.
12790   /// It may be SDValue().
12791   SDValue Mask;
12792   /// Value of the vector length operand.
12793   /// It may be SDValue().
12794   SDValue VL;
12795   /// Original value that this NodeExtensionHelper represents.
12796   SDValue OrigOperand;
12797 
12798   /// Get the value feeding the extension or the value itself.
12799   /// E.g., for zext(a), this would return a.
12800   SDValue getSource() const {
12801     switch (OrigOperand.getOpcode()) {
12802     case RISCVISD::VSEXT_VL:
12803     case RISCVISD::VZEXT_VL:
12804       return OrigOperand.getOperand(0);
12805     default:
12806       return OrigOperand;
12807     }
12808   }
12809 
12810   /// Check if this instance represents a splat.
12811   bool isSplat() const {
12812     return OrigOperand.getOpcode() == RISCVISD::VMV_V_X_VL;
12813   }
12814 
12815   /// Get or create a value that can feed \p Root with the given extension \p
12816   /// SExt. If \p SExt is std::nullopt, this returns the source of this operand.
12817   /// \see ::getSource().
12818   SDValue getOrCreateExtendedOp(const SDNode *Root, SelectionDAG &DAG,
12819                                 std::optional<bool> SExt) const {
12820     if (!SExt.has_value())
12821       return OrigOperand;
12822 
12823     MVT NarrowVT = getNarrowType(Root);
12824 
12825     SDValue Source = getSource();
12826     if (Source.getValueType() == NarrowVT)
12827       return Source;
12828 
12829     unsigned ExtOpc = *SExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
12830 
12831     // If we need an extension, we should be changing the type.
12832     SDLoc DL(Root);
12833     auto [Mask, VL] = getMaskAndVL(Root);
12834     switch (OrigOperand.getOpcode()) {
12835     case RISCVISD::VSEXT_VL:
12836     case RISCVISD::VZEXT_VL:
12837       return DAG.getNode(ExtOpc, DL, NarrowVT, Source, Mask, VL);
12838     case RISCVISD::VMV_V_X_VL:
12839       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
12840                          DAG.getUNDEF(NarrowVT), Source.getOperand(1), VL);
12841     default:
12842       // Other opcodes can only come from the original LHS of VW(ADD|SUB)_W_VL
12843       // and that operand should already have the right NarrowVT so no
12844       // extension should be required at this point.
12845       llvm_unreachable("Unsupported opcode");
12846     }
12847   }
12848 
12849   /// Helper function to get the narrow type for \p Root.
12850   /// The narrow type is the type of \p Root where we divided the size of each
12851   /// element by 2. E.g., if Root's type <2xi16> -> narrow type <2xi8>.
12852   /// \pre The size of the type of the elements of Root must be a multiple of 2
12853   /// and be greater than 16.
12854   static MVT getNarrowType(const SDNode *Root) {
12855     MVT VT = Root->getSimpleValueType(0);
12856 
12857     // Determine the narrow size.
12858     unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
12859     assert(NarrowSize >= 8 && "Trying to extend something we can't represent");
12860     MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
12861                                     VT.getVectorElementCount());
12862     return NarrowVT;
12863   }
12864 
12865   /// Return the opcode required to materialize the folding of the sign
12866   /// extensions (\p IsSExt == true) or zero extensions (IsSExt == false) for
12867   /// both operands for \p Opcode.
12868   /// Put differently, get the opcode to materialize:
12869   /// - ISExt == true: \p Opcode(sext(a), sext(b)) -> newOpcode(a, b)
12870   /// - ISExt == false: \p Opcode(zext(a), zext(b)) -> newOpcode(a, b)
12871   /// \pre \p Opcode represents a supported root (\see ::isSupportedRoot()).
12872   static unsigned getSameExtensionOpcode(unsigned Opcode, bool IsSExt) {
12873     switch (Opcode) {
12874     case RISCVISD::ADD_VL:
12875     case RISCVISD::VWADD_W_VL:
12876     case RISCVISD::VWADDU_W_VL:
12877       return IsSExt ? RISCVISD::VWADD_VL : RISCVISD::VWADDU_VL;
12878     case RISCVISD::MUL_VL:
12879       return IsSExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
12880     case RISCVISD::SUB_VL:
12881     case RISCVISD::VWSUB_W_VL:
12882     case RISCVISD::VWSUBU_W_VL:
12883       return IsSExt ? RISCVISD::VWSUB_VL : RISCVISD::VWSUBU_VL;
12884     default:
12885       llvm_unreachable("Unexpected opcode");
12886     }
12887   }
12888 
12889   /// Get the opcode to materialize \p Opcode(sext(a), zext(b)) ->
12890   /// newOpcode(a, b).
12891   static unsigned getSUOpcode(unsigned Opcode) {
12892     assert(Opcode == RISCVISD::MUL_VL && "SU is only supported for MUL");
12893     return RISCVISD::VWMULSU_VL;
12894   }
12895 
12896   /// Get the opcode to materialize \p Opcode(a, s|zext(b)) ->
12897   /// newOpcode(a, b).
12898   static unsigned getWOpcode(unsigned Opcode, bool IsSExt) {
12899     switch (Opcode) {
12900     case RISCVISD::ADD_VL:
12901       return IsSExt ? RISCVISD::VWADD_W_VL : RISCVISD::VWADDU_W_VL;
12902     case RISCVISD::SUB_VL:
12903       return IsSExt ? RISCVISD::VWSUB_W_VL : RISCVISD::VWSUBU_W_VL;
12904     default:
12905       llvm_unreachable("Unexpected opcode");
12906     }
12907   }
12908 
12909   using CombineToTry = std::function<std::optional<CombineResult>(
12910       SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/,
12911       const NodeExtensionHelper & /*RHS*/)>;
12912 
12913   /// Check if this node needs to be fully folded or extended for all users.
12914   bool needToPromoteOtherUsers() const { return EnforceOneUse; }
12915 
12916   /// Helper method to set the various fields of this struct based on the
12917   /// type of \p Root.
12918   void fillUpExtensionSupport(SDNode *Root, SelectionDAG &DAG) {
12919     SupportsZExt = false;
12920     SupportsSExt = false;
12921     EnforceOneUse = true;
12922     CheckMask = true;
12923     switch (OrigOperand.getOpcode()) {
12924     case RISCVISD::VZEXT_VL:
12925       SupportsZExt = true;
12926       Mask = OrigOperand.getOperand(1);
12927       VL = OrigOperand.getOperand(2);
12928       break;
12929     case RISCVISD::VSEXT_VL:
12930       SupportsSExt = true;
12931       Mask = OrigOperand.getOperand(1);
12932       VL = OrigOperand.getOperand(2);
12933       break;
12934     case RISCVISD::VMV_V_X_VL: {
12935       // Historically, we didn't care about splat values not disappearing during
12936       // combines.
12937       EnforceOneUse = false;
12938       CheckMask = false;
12939       VL = OrigOperand.getOperand(2);
12940 
12941       // The operand is a splat of a scalar.
12942 
12943       // The pasthru must be undef for tail agnostic.
12944       if (!OrigOperand.getOperand(0).isUndef())
12945         break;
12946 
12947       // Get the scalar value.
12948       SDValue Op = OrigOperand.getOperand(1);
12949 
12950       // See if we have enough sign bits or zero bits in the scalar to use a
12951       // widening opcode by splatting to smaller element size.
12952       MVT VT = Root->getSimpleValueType(0);
12953       unsigned EltBits = VT.getScalarSizeInBits();
12954       unsigned ScalarBits = Op.getValueSizeInBits();
12955       // Make sure we're getting all element bits from the scalar register.
12956       // FIXME: Support implicit sign extension of vmv.v.x?
12957       if (ScalarBits < EltBits)
12958         break;
12959 
12960       unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
12961       // If the narrow type cannot be expressed with a legal VMV,
12962       // this is not a valid candidate.
12963       if (NarrowSize < 8)
12964         break;
12965 
12966       if (DAG.ComputeMaxSignificantBits(Op) <= NarrowSize)
12967         SupportsSExt = true;
12968       if (DAG.MaskedValueIsZero(Op,
12969                                 APInt::getBitsSetFrom(ScalarBits, NarrowSize)))
12970         SupportsZExt = true;
12971       break;
12972     }
12973     default:
12974       break;
12975     }
12976   }
12977 
12978   /// Check if \p Root supports any extension folding combines.
12979   static bool isSupportedRoot(const SDNode *Root) {
12980     switch (Root->getOpcode()) {
12981     case RISCVISD::ADD_VL:
12982     case RISCVISD::MUL_VL:
12983     case RISCVISD::VWADD_W_VL:
12984     case RISCVISD::VWADDU_W_VL:
12985     case RISCVISD::SUB_VL:
12986     case RISCVISD::VWSUB_W_VL:
12987     case RISCVISD::VWSUBU_W_VL:
12988       return true;
12989     default:
12990       return false;
12991     }
12992   }
12993 
12994   /// Build a NodeExtensionHelper for \p Root.getOperand(\p OperandIdx).
12995   NodeExtensionHelper(SDNode *Root, unsigned OperandIdx, SelectionDAG &DAG) {
12996     assert(isSupportedRoot(Root) && "Trying to build an helper with an "
12997                                     "unsupported root");
12998     assert(OperandIdx < 2 && "Requesting something else than LHS or RHS");
12999     OrigOperand = Root->getOperand(OperandIdx);
13000 
13001     unsigned Opc = Root->getOpcode();
13002     switch (Opc) {
13003     // We consider VW<ADD|SUB>(U)_W(LHS, RHS) as if they were
13004     // <ADD|SUB>(LHS, S|ZEXT(RHS))
13005     case RISCVISD::VWADD_W_VL:
13006     case RISCVISD::VWADDU_W_VL:
13007     case RISCVISD::VWSUB_W_VL:
13008     case RISCVISD::VWSUBU_W_VL:
13009       if (OperandIdx == 1) {
13010         SupportsZExt =
13011             Opc == RISCVISD::VWADDU_W_VL || Opc == RISCVISD::VWSUBU_W_VL;
13012         SupportsSExt = !SupportsZExt;
13013         std::tie(Mask, VL) = getMaskAndVL(Root);
13014         CheckMask = true;
13015         // There's no existing extension here, so we don't have to worry about
13016         // making sure it gets removed.
13017         EnforceOneUse = false;
13018         break;
13019       }
13020       [[fallthrough]];
13021     default:
13022       fillUpExtensionSupport(Root, DAG);
13023       break;
13024     }
13025   }
13026 
13027   /// Check if this operand is compatible with the given vector length \p VL.
13028   bool isVLCompatible(SDValue VL) const {
13029     return this->VL != SDValue() && this->VL == VL;
13030   }
13031 
13032   /// Check if this operand is compatible with the given \p Mask.
13033   bool isMaskCompatible(SDValue Mask) const {
13034     return !CheckMask || (this->Mask != SDValue() && this->Mask == Mask);
13035   }
13036 
13037   /// Helper function to get the Mask and VL from \p Root.
13038   static std::pair<SDValue, SDValue> getMaskAndVL(const SDNode *Root) {
13039     assert(isSupportedRoot(Root) && "Unexpected root");
13040     return std::make_pair(Root->getOperand(3), Root->getOperand(4));
13041   }
13042 
13043   /// Check if the Mask and VL of this operand are compatible with \p Root.
13044   bool areVLAndMaskCompatible(const SDNode *Root) const {
13045     auto [Mask, VL] = getMaskAndVL(Root);
13046     return isMaskCompatible(Mask) && isVLCompatible(VL);
13047   }
13048 
13049   /// Helper function to check if \p N is commutative with respect to the
13050   /// foldings that are supported by this class.
13051   static bool isCommutative(const SDNode *N) {
13052     switch (N->getOpcode()) {
13053     case RISCVISD::ADD_VL:
13054     case RISCVISD::MUL_VL:
13055     case RISCVISD::VWADD_W_VL:
13056     case RISCVISD::VWADDU_W_VL:
13057       return true;
13058     case RISCVISD::SUB_VL:
13059     case RISCVISD::VWSUB_W_VL:
13060     case RISCVISD::VWSUBU_W_VL:
13061       return false;
13062     default:
13063       llvm_unreachable("Unexpected opcode");
13064     }
13065   }
13066 
13067   /// Get a list of combine to try for folding extensions in \p Root.
13068   /// Note that each returned CombineToTry function doesn't actually modify
13069   /// anything. Instead they produce an optional CombineResult that if not None,
13070   /// need to be materialized for the combine to be applied.
13071   /// \see CombineResult::materialize.
13072   /// If the related CombineToTry function returns std::nullopt, that means the
13073   /// combine didn't match.
13074   static SmallVector<CombineToTry> getSupportedFoldings(const SDNode *Root);
13075 };
13076 
13077 /// Helper structure that holds all the necessary information to materialize a
13078 /// combine that does some extension folding.
13079 struct CombineResult {
13080   /// Opcode to be generated when materializing the combine.
13081   unsigned TargetOpcode;
13082   // No value means no extension is needed. If extension is needed, the value
13083   // indicates if it needs to be sign extended.
13084   std::optional<bool> SExtLHS;
13085   std::optional<bool> SExtRHS;
13086   /// Root of the combine.
13087   SDNode *Root;
13088   /// LHS of the TargetOpcode.
13089   NodeExtensionHelper LHS;
13090   /// RHS of the TargetOpcode.
13091   NodeExtensionHelper RHS;
13092 
13093   CombineResult(unsigned TargetOpcode, SDNode *Root,
13094                 const NodeExtensionHelper &LHS, std::optional<bool> SExtLHS,
13095                 const NodeExtensionHelper &RHS, std::optional<bool> SExtRHS)
13096       : TargetOpcode(TargetOpcode), SExtLHS(SExtLHS), SExtRHS(SExtRHS),
13097         Root(Root), LHS(LHS), RHS(RHS) {}
13098 
13099   /// Return a value that uses TargetOpcode and that can be used to replace
13100   /// Root.
13101   /// The actual replacement is *not* done in that method.
13102   SDValue materialize(SelectionDAG &DAG) const {
13103     SDValue Mask, VL, Merge;
13104     std::tie(Mask, VL) = NodeExtensionHelper::getMaskAndVL(Root);
13105     Merge = Root->getOperand(2);
13106     return DAG.getNode(TargetOpcode, SDLoc(Root), Root->getValueType(0),
13107                        LHS.getOrCreateExtendedOp(Root, DAG, SExtLHS),
13108                        RHS.getOrCreateExtendedOp(Root, DAG, SExtRHS), Merge,
13109                        Mask, VL);
13110   }
13111 };
13112 
13113 /// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
13114 /// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
13115 /// are zext) and LHS and RHS can be folded into Root.
13116 /// AllowSExt and AllozZExt define which form `ext` can take in this pattern.
13117 ///
13118 /// \note If the pattern can match with both zext and sext, the returned
13119 /// CombineResult will feature the zext result.
13120 ///
13121 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13122 /// can be used to apply the pattern.
13123 static std::optional<CombineResult>
13124 canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
13125                                  const NodeExtensionHelper &RHS, bool AllowSExt,
13126                                  bool AllowZExt) {
13127   assert((AllowSExt || AllowZExt) && "Forgot to set what you want?");
13128   if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
13129     return std::nullopt;
13130   if (AllowZExt && LHS.SupportsZExt && RHS.SupportsZExt)
13131     return CombineResult(NodeExtensionHelper::getSameExtensionOpcode(
13132                              Root->getOpcode(), /*IsSExt=*/false),
13133                          Root, LHS, /*SExtLHS=*/false, RHS,
13134                          /*SExtRHS=*/false);
13135   if (AllowSExt && LHS.SupportsSExt && RHS.SupportsSExt)
13136     return CombineResult(NodeExtensionHelper::getSameExtensionOpcode(
13137                              Root->getOpcode(), /*IsSExt=*/true),
13138                          Root, LHS, /*SExtLHS=*/true, RHS,
13139                          /*SExtRHS=*/true);
13140   return std::nullopt;
13141 }
13142 
13143 /// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
13144 /// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
13145 /// are zext) and LHS and RHS can be folded into Root.
13146 ///
13147 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13148 /// can be used to apply the pattern.
13149 static std::optional<CombineResult>
13150 canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
13151                              const NodeExtensionHelper &RHS) {
13152   return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
13153                                           /*AllowZExt=*/true);
13154 }
13155 
13156 /// Check if \p Root follows a pattern Root(LHS, ext(RHS))
13157 ///
13158 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13159 /// can be used to apply the pattern.
13160 static std::optional<CombineResult>
13161 canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
13162               const NodeExtensionHelper &RHS) {
13163   if (!RHS.areVLAndMaskCompatible(Root))
13164     return std::nullopt;
13165 
13166   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
13167   // sext/zext?
13168   // Control this behavior behind an option (AllowSplatInVW_W) for testing
13169   // purposes.
13170   if (RHS.SupportsZExt && (!RHS.isSplat() || AllowSplatInVW_W))
13171     return CombineResult(
13172         NodeExtensionHelper::getWOpcode(Root->getOpcode(), /*IsSExt=*/false),
13173         Root, LHS, /*SExtLHS=*/std::nullopt, RHS, /*SExtRHS=*/false);
13174   if (RHS.SupportsSExt && (!RHS.isSplat() || AllowSplatInVW_W))
13175     return CombineResult(
13176         NodeExtensionHelper::getWOpcode(Root->getOpcode(), /*IsSExt=*/true),
13177         Root, LHS, /*SExtLHS=*/std::nullopt, RHS, /*SExtRHS=*/true);
13178   return std::nullopt;
13179 }
13180 
13181 /// Check if \p Root follows a pattern Root(sext(LHS), sext(RHS))
13182 ///
13183 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13184 /// can be used to apply the pattern.
13185 static std::optional<CombineResult>
13186 canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
13187                     const NodeExtensionHelper &RHS) {
13188   return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/true,
13189                                           /*AllowZExt=*/false);
13190 }
13191 
13192 /// Check if \p Root follows a pattern Root(zext(LHS), zext(RHS))
13193 ///
13194 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13195 /// can be used to apply the pattern.
13196 static std::optional<CombineResult>
13197 canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
13198                     const NodeExtensionHelper &RHS) {
13199   return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, /*AllowSExt=*/false,
13200                                           /*AllowZExt=*/true);
13201 }
13202 
13203 /// Check if \p Root follows a pattern Root(sext(LHS), zext(RHS))
13204 ///
13205 /// \returns std::nullopt if the pattern doesn't match or a CombineResult that
13206 /// can be used to apply the pattern.
13207 static std::optional<CombineResult>
13208 canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
13209                const NodeExtensionHelper &RHS) {
13210   if (!LHS.SupportsSExt || !RHS.SupportsZExt)
13211     return std::nullopt;
13212   if (!LHS.areVLAndMaskCompatible(Root) || !RHS.areVLAndMaskCompatible(Root))
13213     return std::nullopt;
13214   return CombineResult(NodeExtensionHelper::getSUOpcode(Root->getOpcode()),
13215                        Root, LHS, /*SExtLHS=*/true, RHS, /*SExtRHS=*/false);
13216 }
13217 
13218 SmallVector<NodeExtensionHelper::CombineToTry>
13219 NodeExtensionHelper::getSupportedFoldings(const SDNode *Root) {
13220   SmallVector<CombineToTry> Strategies;
13221   switch (Root->getOpcode()) {
13222   case RISCVISD::ADD_VL:
13223   case RISCVISD::SUB_VL:
13224     // add|sub -> vwadd(u)|vwsub(u)
13225     Strategies.push_back(canFoldToVWWithSameExtension);
13226     // add|sub -> vwadd(u)_w|vwsub(u)_w
13227     Strategies.push_back(canFoldToVW_W);
13228     break;
13229   case RISCVISD::MUL_VL:
13230     // mul -> vwmul(u)
13231     Strategies.push_back(canFoldToVWWithSameExtension);
13232     // mul -> vwmulsu
13233     Strategies.push_back(canFoldToVW_SU);
13234     break;
13235   case RISCVISD::VWADD_W_VL:
13236   case RISCVISD::VWSUB_W_VL:
13237     // vwadd_w|vwsub_w -> vwadd|vwsub
13238     Strategies.push_back(canFoldToVWWithSEXT);
13239     break;
13240   case RISCVISD::VWADDU_W_VL:
13241   case RISCVISD::VWSUBU_W_VL:
13242     // vwaddu_w|vwsubu_w -> vwaddu|vwsubu
13243     Strategies.push_back(canFoldToVWWithZEXT);
13244     break;
13245   default:
13246     llvm_unreachable("Unexpected opcode");
13247   }
13248   return Strategies;
13249 }
13250 } // End anonymous namespace.
13251 
13252 /// Combine a binary operation to its equivalent VW or VW_W form.
13253 /// The supported combines are:
13254 /// add_vl -> vwadd(u) | vwadd(u)_w
13255 /// sub_vl -> vwsub(u) | vwsub(u)_w
13256 /// mul_vl -> vwmul(u) | vwmul_su
13257 /// vwadd_w(u) -> vwadd(u)
13258 /// vwub_w(u) -> vwadd(u)
13259 static SDValue
13260 combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13261   SelectionDAG &DAG = DCI.DAG;
13262 
13263   assert(NodeExtensionHelper::isSupportedRoot(N) &&
13264          "Shouldn't have called this method");
13265   SmallVector<SDNode *> Worklist;
13266   SmallSet<SDNode *, 8> Inserted;
13267   Worklist.push_back(N);
13268   Inserted.insert(N);
13269   SmallVector<CombineResult> CombinesToApply;
13270 
13271   while (!Worklist.empty()) {
13272     SDNode *Root = Worklist.pop_back_val();
13273     if (!NodeExtensionHelper::isSupportedRoot(Root))
13274       return SDValue();
13275 
13276     NodeExtensionHelper LHS(N, 0, DAG);
13277     NodeExtensionHelper RHS(N, 1, DAG);
13278     auto AppendUsersIfNeeded = [&Worklist,
13279                                 &Inserted](const NodeExtensionHelper &Op) {
13280       if (Op.needToPromoteOtherUsers()) {
13281         for (SDNode *TheUse : Op.OrigOperand->uses()) {
13282           if (Inserted.insert(TheUse).second)
13283             Worklist.push_back(TheUse);
13284         }
13285       }
13286     };
13287 
13288     // Control the compile time by limiting the number of node we look at in
13289     // total.
13290     if (Inserted.size() > ExtensionMaxWebSize)
13291       return SDValue();
13292 
13293     SmallVector<NodeExtensionHelper::CombineToTry> FoldingStrategies =
13294         NodeExtensionHelper::getSupportedFoldings(N);
13295 
13296     assert(!FoldingStrategies.empty() && "Nothing to be folded");
13297     bool Matched = false;
13298     for (int Attempt = 0;
13299          (Attempt != 1 + NodeExtensionHelper::isCommutative(N)) && !Matched;
13300          ++Attempt) {
13301 
13302       for (NodeExtensionHelper::CombineToTry FoldingStrategy :
13303            FoldingStrategies) {
13304         std::optional<CombineResult> Res = FoldingStrategy(N, LHS, RHS);
13305         if (Res) {
13306           Matched = true;
13307           CombinesToApply.push_back(*Res);
13308           // All the inputs that are extended need to be folded, otherwise
13309           // we would be leaving the old input (since it is may still be used),
13310           // and the new one.
13311           if (Res->SExtLHS.has_value())
13312             AppendUsersIfNeeded(LHS);
13313           if (Res->SExtRHS.has_value())
13314             AppendUsersIfNeeded(RHS);
13315           break;
13316         }
13317       }
13318       std::swap(LHS, RHS);
13319     }
13320     // Right now we do an all or nothing approach.
13321     if (!Matched)
13322       return SDValue();
13323   }
13324   // Store the value for the replacement of the input node separately.
13325   SDValue InputRootReplacement;
13326   // We do the RAUW after we materialize all the combines, because some replaced
13327   // nodes may be feeding some of the yet-to-be-replaced nodes. Put differently,
13328   // some of these nodes may appear in the NodeExtensionHelpers of some of the
13329   // yet-to-be-visited CombinesToApply roots.
13330   SmallVector<std::pair<SDValue, SDValue>> ValuesToReplace;
13331   ValuesToReplace.reserve(CombinesToApply.size());
13332   for (CombineResult Res : CombinesToApply) {
13333     SDValue NewValue = Res.materialize(DAG);
13334     if (!InputRootReplacement) {
13335       assert(Res.Root == N &&
13336              "First element is expected to be the current node");
13337       InputRootReplacement = NewValue;
13338     } else {
13339       ValuesToReplace.emplace_back(SDValue(Res.Root, 0), NewValue);
13340     }
13341   }
13342   for (std::pair<SDValue, SDValue> OldNewValues : ValuesToReplace) {
13343     DAG.ReplaceAllUsesOfValueWith(OldNewValues.first, OldNewValues.second);
13344     DCI.AddToWorklist(OldNewValues.second.getNode());
13345   }
13346   return InputRootReplacement;
13347 }
13348 
13349 // Helper function for performMemPairCombine.
13350 // Try to combine the memory loads/stores LSNode1 and LSNode2
13351 // into a single memory pair operation.
13352 static SDValue tryMemPairCombine(SelectionDAG &DAG, LSBaseSDNode *LSNode1,
13353                                  LSBaseSDNode *LSNode2, SDValue BasePtr,
13354                                  uint64_t Imm) {
13355   SmallPtrSet<const SDNode *, 32> Visited;
13356   SmallVector<const SDNode *, 8> Worklist = {LSNode1, LSNode2};
13357 
13358   if (SDNode::hasPredecessorHelper(LSNode1, Visited, Worklist) ||
13359       SDNode::hasPredecessorHelper(LSNode2, Visited, Worklist))
13360     return SDValue();
13361 
13362   MachineFunction &MF = DAG.getMachineFunction();
13363   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
13364 
13365   // The new operation has twice the width.
13366   MVT XLenVT = Subtarget.getXLenVT();
13367   EVT MemVT = LSNode1->getMemoryVT();
13368   EVT NewMemVT = (MemVT == MVT::i32) ? MVT::i64 : MVT::i128;
13369   MachineMemOperand *MMO = LSNode1->getMemOperand();
13370   MachineMemOperand *NewMMO = MF.getMachineMemOperand(
13371       MMO, MMO->getPointerInfo(), MemVT == MVT::i32 ? 8 : 16);
13372 
13373   if (LSNode1->getOpcode() == ISD::LOAD) {
13374     auto Ext = cast<LoadSDNode>(LSNode1)->getExtensionType();
13375     unsigned Opcode;
13376     if (MemVT == MVT::i32)
13377       Opcode = (Ext == ISD::ZEXTLOAD) ? RISCVISD::TH_LWUD : RISCVISD::TH_LWD;
13378     else
13379       Opcode = RISCVISD::TH_LDD;
13380 
13381     SDValue Res = DAG.getMemIntrinsicNode(
13382         Opcode, SDLoc(LSNode1), DAG.getVTList({XLenVT, XLenVT, MVT::Other}),
13383         {LSNode1->getChain(), BasePtr,
13384          DAG.getConstant(Imm, SDLoc(LSNode1), XLenVT)},
13385         NewMemVT, NewMMO);
13386 
13387     SDValue Node1 =
13388         DAG.getMergeValues({Res.getValue(0), Res.getValue(2)}, SDLoc(LSNode1));
13389     SDValue Node2 =
13390         DAG.getMergeValues({Res.getValue(1), Res.getValue(2)}, SDLoc(LSNode2));
13391 
13392     DAG.ReplaceAllUsesWith(LSNode2, Node2.getNode());
13393     return Node1;
13394   } else {
13395     unsigned Opcode = (MemVT == MVT::i32) ? RISCVISD::TH_SWD : RISCVISD::TH_SDD;
13396 
13397     SDValue Res = DAG.getMemIntrinsicNode(
13398         Opcode, SDLoc(LSNode1), DAG.getVTList(MVT::Other),
13399         {LSNode1->getChain(), LSNode1->getOperand(1), LSNode2->getOperand(1),
13400          BasePtr, DAG.getConstant(Imm, SDLoc(LSNode1), XLenVT)},
13401         NewMemVT, NewMMO);
13402 
13403     DAG.ReplaceAllUsesWith(LSNode2, Res.getNode());
13404     return Res;
13405   }
13406 }
13407 
13408 // Try to combine two adjacent loads/stores to a single pair instruction from
13409 // the XTHeadMemPair vendor extension.
13410 static SDValue performMemPairCombine(SDNode *N,
13411                                      TargetLowering::DAGCombinerInfo &DCI) {
13412   SelectionDAG &DAG = DCI.DAG;
13413   MachineFunction &MF = DAG.getMachineFunction();
13414   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
13415 
13416   // Target does not support load/store pair.
13417   if (!Subtarget.hasVendorXTHeadMemPair())
13418     return SDValue();
13419 
13420   LSBaseSDNode *LSNode1 = cast<LSBaseSDNode>(N);
13421   EVT MemVT = LSNode1->getMemoryVT();
13422   unsigned OpNum = LSNode1->getOpcode() == ISD::LOAD ? 1 : 2;
13423 
13424   // No volatile, indexed or atomic loads/stores.
13425   if (!LSNode1->isSimple() || LSNode1->isIndexed())
13426     return SDValue();
13427 
13428   // Function to get a base + constant representation from a memory value.
13429   auto ExtractBaseAndOffset = [](SDValue Ptr) -> std::pair<SDValue, uint64_t> {
13430     if (Ptr->getOpcode() == ISD::ADD)
13431       if (auto *C1 = dyn_cast<ConstantSDNode>(Ptr->getOperand(1)))
13432         return {Ptr->getOperand(0), C1->getZExtValue()};
13433     return {Ptr, 0};
13434   };
13435 
13436   auto [Base1, Offset1] = ExtractBaseAndOffset(LSNode1->getOperand(OpNum));
13437 
13438   SDValue Chain = N->getOperand(0);
13439   for (SDNode::use_iterator UI = Chain->use_begin(), UE = Chain->use_end();
13440        UI != UE; ++UI) {
13441     SDUse &Use = UI.getUse();
13442     if (Use.getUser() != N && Use.getResNo() == 0 &&
13443         Use.getUser()->getOpcode() == N->getOpcode()) {
13444       LSBaseSDNode *LSNode2 = cast<LSBaseSDNode>(Use.getUser());
13445 
13446       // No volatile, indexed or atomic loads/stores.
13447       if (!LSNode2->isSimple() || LSNode2->isIndexed())
13448         continue;
13449 
13450       // Check if LSNode1 and LSNode2 have the same type and extension.
13451       if (LSNode1->getOpcode() == ISD::LOAD)
13452         if (cast<LoadSDNode>(LSNode2)->getExtensionType() !=
13453             cast<LoadSDNode>(LSNode1)->getExtensionType())
13454           continue;
13455 
13456       if (LSNode1->getMemoryVT() != LSNode2->getMemoryVT())
13457         continue;
13458 
13459       auto [Base2, Offset2] = ExtractBaseAndOffset(LSNode2->getOperand(OpNum));
13460 
13461       // Check if the base pointer is the same for both instruction.
13462       if (Base1 != Base2)
13463         continue;
13464 
13465       // Check if the offsets match the XTHeadMemPair encoding contraints.
13466       bool Valid = false;
13467       if (MemVT == MVT::i32) {
13468         // Check for adjacent i32 values and a 2-bit index.
13469         if ((Offset1 + 4 == Offset2) && isShiftedUInt<2, 3>(Offset1))
13470           Valid = true;
13471       } else if (MemVT == MVT::i64) {
13472         // Check for adjacent i64 values and a 2-bit index.
13473         if ((Offset1 + 8 == Offset2) && isShiftedUInt<2, 4>(Offset1))
13474           Valid = true;
13475       }
13476 
13477       if (!Valid)
13478         continue;
13479 
13480       // Try to combine.
13481       if (SDValue Res =
13482               tryMemPairCombine(DAG, LSNode1, LSNode2, Base1, Offset1))
13483         return Res;
13484     }
13485   }
13486 
13487   return SDValue();
13488 }
13489 
13490 // Fold
13491 //   (fp_to_int (froundeven X)) -> fcvt X, rne
13492 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
13493 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
13494 //   (fp_to_int (fceil X))      -> fcvt X, rup
13495 //   (fp_to_int (fround X))     -> fcvt X, rmm
13496 static SDValue performFP_TO_INTCombine(SDNode *N,
13497                                        TargetLowering::DAGCombinerInfo &DCI,
13498                                        const RISCVSubtarget &Subtarget) {
13499   SelectionDAG &DAG = DCI.DAG;
13500   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13501   MVT XLenVT = Subtarget.getXLenVT();
13502 
13503   SDValue Src = N->getOperand(0);
13504 
13505   // Don't do this for strict-fp Src.
13506   if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
13507     return SDValue();
13508 
13509   // Ensure the FP type is legal.
13510   if (!TLI.isTypeLegal(Src.getValueType()))
13511     return SDValue();
13512 
13513   // Don't do this for f16 with Zfhmin and not Zfh.
13514   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
13515     return SDValue();
13516 
13517   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
13518   // If the result is invalid, we didn't find a foldable instruction.
13519   // If the result is dynamic, then we found an frint which we don't yet
13520   // support. It will cause 7 to be written to the FRM CSR for vector.
13521   // FIXME: We could support this by using VFCVT_X_F_VL/VFCVT_XU_F_VL below.
13522   if (FRM == RISCVFPRndMode::Invalid || FRM == RISCVFPRndMode::DYN)
13523     return SDValue();
13524 
13525   SDLoc DL(N);
13526   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
13527   EVT VT = N->getValueType(0);
13528 
13529   if (VT.isVector() && TLI.isTypeLegal(VT)) {
13530     MVT SrcVT = Src.getSimpleValueType();
13531     MVT SrcContainerVT = SrcVT;
13532     MVT ContainerVT = VT.getSimpleVT();
13533     SDValue XVal = Src.getOperand(0);
13534 
13535     // For widening and narrowing conversions we just combine it into a
13536     // VFCVT_..._VL node, as there are no specific VFWCVT/VFNCVT VL nodes. They
13537     // end up getting lowered to their appropriate pseudo instructions based on
13538     // their operand types
13539     if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits() * 2 ||
13540         VT.getScalarSizeInBits() * 2 < SrcVT.getScalarSizeInBits())
13541       return SDValue();
13542 
13543     // Make fixed-length vectors scalable first
13544     if (SrcVT.isFixedLengthVector()) {
13545       SrcContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
13546       XVal = convertToScalableVector(SrcContainerVT, XVal, DAG, Subtarget);
13547       ContainerVT =
13548           getContainerForFixedLengthVector(DAG, ContainerVT, Subtarget);
13549     }
13550 
13551     auto [Mask, VL] =
13552         getDefaultVLOps(SrcVT, SrcContainerVT, DL, DAG, Subtarget);
13553 
13554     SDValue FpToInt;
13555     if (FRM == RISCVFPRndMode::RTZ) {
13556       // Use the dedicated trunc static rounding mode if we're truncating so we
13557       // don't need to generate calls to fsrmi/fsrm
13558       unsigned Opc =
13559           IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
13560       FpToInt = DAG.getNode(Opc, DL, ContainerVT, XVal, Mask, VL);
13561     } else {
13562       unsigned Opc =
13563           IsSigned ? RISCVISD::VFCVT_RM_X_F_VL : RISCVISD::VFCVT_RM_XU_F_VL;
13564       FpToInt = DAG.getNode(Opc, DL, ContainerVT, XVal, Mask,
13565                             DAG.getTargetConstant(FRM, DL, XLenVT), VL);
13566     }
13567 
13568     // If converted from fixed-length to scalable, convert back
13569     if (VT.isFixedLengthVector())
13570       FpToInt = convertFromScalableVector(VT, FpToInt, DAG, Subtarget);
13571 
13572     return FpToInt;
13573   }
13574 
13575   // Only handle XLen or i32 types. Other types narrower than XLen will
13576   // eventually be legalized to XLenVT.
13577   if (VT != MVT::i32 && VT != XLenVT)
13578     return SDValue();
13579 
13580   unsigned Opc;
13581   if (VT == XLenVT)
13582     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
13583   else
13584     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
13585 
13586   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
13587                                 DAG.getTargetConstant(FRM, DL, XLenVT));
13588   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
13589 }
13590 
13591 // Fold
13592 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
13593 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
13594 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
13595 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
13596 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
13597 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
13598                                        TargetLowering::DAGCombinerInfo &DCI,
13599                                        const RISCVSubtarget &Subtarget) {
13600   SelectionDAG &DAG = DCI.DAG;
13601   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13602   MVT XLenVT = Subtarget.getXLenVT();
13603 
13604   // Only handle XLen types. Other types narrower than XLen will eventually be
13605   // legalized to XLenVT.
13606   EVT DstVT = N->getValueType(0);
13607   if (DstVT != XLenVT)
13608     return SDValue();
13609 
13610   SDValue Src = N->getOperand(0);
13611 
13612   // Don't do this for strict-fp Src.
13613   if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
13614     return SDValue();
13615 
13616   // Ensure the FP type is also legal.
13617   if (!TLI.isTypeLegal(Src.getValueType()))
13618     return SDValue();
13619 
13620   // Don't do this for f16 with Zfhmin and not Zfh.
13621   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
13622     return SDValue();
13623 
13624   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
13625 
13626   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src.getOpcode());
13627   if (FRM == RISCVFPRndMode::Invalid)
13628     return SDValue();
13629 
13630   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
13631 
13632   unsigned Opc;
13633   if (SatVT == DstVT)
13634     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
13635   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
13636     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
13637   else
13638     return SDValue();
13639   // FIXME: Support other SatVTs by clamping before or after the conversion.
13640 
13641   Src = Src.getOperand(0);
13642 
13643   SDLoc DL(N);
13644   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
13645                                 DAG.getTargetConstant(FRM, DL, XLenVT));
13646 
13647   // fcvt.wu.* sign extends bit 31 on RV64. FP_TO_UINT_SAT expects to zero
13648   // extend.
13649   if (Opc == RISCVISD::FCVT_WU_RV64)
13650     FpToInt = DAG.getZeroExtendInReg(FpToInt, DL, MVT::i32);
13651 
13652   // RISC-V FP-to-int conversions saturate to the destination register size, but
13653   // don't produce 0 for nan.
13654   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
13655   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
13656 }
13657 
13658 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
13659 // smaller than XLenVT.
13660 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
13661                                         const RISCVSubtarget &Subtarget) {
13662   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
13663 
13664   SDValue Src = N->getOperand(0);
13665   if (Src.getOpcode() != ISD::BSWAP)
13666     return SDValue();
13667 
13668   EVT VT = N->getValueType(0);
13669   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
13670       !llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
13671     return SDValue();
13672 
13673   SDLoc DL(N);
13674   return DAG.getNode(RISCVISD::BREV8, DL, VT, Src.getOperand(0));
13675 }
13676 
13677 // Convert from one FMA opcode to another based on whether we are negating the
13678 // multiply result and/or the accumulator.
13679 // NOTE: Only supports RVV operations with VL.
13680 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
13681   // Negating the multiply result changes ADD<->SUB and toggles 'N'.
13682   if (NegMul) {
13683     // clang-format off
13684     switch (Opcode) {
13685     default: llvm_unreachable("Unexpected opcode");
13686     case RISCVISD::VFMADD_VL:  Opcode = RISCVISD::VFNMSUB_VL; break;
13687     case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFMADD_VL;  break;
13688     case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFMSUB_VL;  break;
13689     case RISCVISD::VFMSUB_VL:  Opcode = RISCVISD::VFNMADD_VL; break;
13690     case RISCVISD::STRICT_VFMADD_VL:  Opcode = RISCVISD::STRICT_VFNMSUB_VL; break;
13691     case RISCVISD::STRICT_VFNMSUB_VL: Opcode = RISCVISD::STRICT_VFMADD_VL;  break;
13692     case RISCVISD::STRICT_VFNMADD_VL: Opcode = RISCVISD::STRICT_VFMSUB_VL;  break;
13693     case RISCVISD::STRICT_VFMSUB_VL:  Opcode = RISCVISD::STRICT_VFNMADD_VL; break;
13694     }
13695     // clang-format on
13696   }
13697 
13698   // Negating the accumulator changes ADD<->SUB.
13699   if (NegAcc) {
13700     // clang-format off
13701     switch (Opcode) {
13702     default: llvm_unreachable("Unexpected opcode");
13703     case RISCVISD::VFMADD_VL:  Opcode = RISCVISD::VFMSUB_VL;  break;
13704     case RISCVISD::VFMSUB_VL:  Opcode = RISCVISD::VFMADD_VL;  break;
13705     case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
13706     case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
13707     case RISCVISD::STRICT_VFMADD_VL:  Opcode = RISCVISD::STRICT_VFMSUB_VL;  break;
13708     case RISCVISD::STRICT_VFMSUB_VL:  Opcode = RISCVISD::STRICT_VFMADD_VL;  break;
13709     case RISCVISD::STRICT_VFNMADD_VL: Opcode = RISCVISD::STRICT_VFNMSUB_VL; break;
13710     case RISCVISD::STRICT_VFNMSUB_VL: Opcode = RISCVISD::STRICT_VFNMADD_VL; break;
13711     }
13712     // clang-format on
13713   }
13714 
13715   return Opcode;
13716 }
13717 
13718 static SDValue combineVFMADD_VLWithVFNEG_VL(SDNode *N, SelectionDAG &DAG) {
13719   // Fold FNEG_VL into FMA opcodes.
13720   // The first operand of strict-fp is chain.
13721   unsigned Offset = N->isTargetStrictFPOpcode();
13722   SDValue A = N->getOperand(0 + Offset);
13723   SDValue B = N->getOperand(1 + Offset);
13724   SDValue C = N->getOperand(2 + Offset);
13725   SDValue Mask = N->getOperand(3 + Offset);
13726   SDValue VL = N->getOperand(4 + Offset);
13727 
13728   auto invertIfNegative = [&Mask, &VL](SDValue &V) {
13729     if (V.getOpcode() == RISCVISD::FNEG_VL && V.getOperand(1) == Mask &&
13730         V.getOperand(2) == VL) {
13731       // Return the negated input.
13732       V = V.getOperand(0);
13733       return true;
13734     }
13735 
13736     return false;
13737   };
13738 
13739   bool NegA = invertIfNegative(A);
13740   bool NegB = invertIfNegative(B);
13741   bool NegC = invertIfNegative(C);
13742 
13743   // If no operands are negated, we're done.
13744   if (!NegA && !NegB && !NegC)
13745     return SDValue();
13746 
13747   unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
13748   if (N->isTargetStrictFPOpcode())
13749     return DAG.getNode(NewOpcode, SDLoc(N), N->getVTList(),
13750                        {N->getOperand(0), A, B, C, Mask, VL});
13751   return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), A, B, C, Mask,
13752                      VL);
13753 }
13754 
13755 static SDValue performVFMADD_VLCombine(SDNode *N, SelectionDAG &DAG,
13756                                        const RISCVSubtarget &Subtarget) {
13757   if (SDValue V = combineVFMADD_VLWithVFNEG_VL(N, DAG))
13758     return V;
13759 
13760   if (N->getValueType(0).isScalableVector() &&
13761       N->getValueType(0).getVectorElementType() == MVT::f32 &&
13762       (Subtarget.hasVInstructionsF16Minimal() &&
13763        !Subtarget.hasVInstructionsF16())) {
13764     return SDValue();
13765   }
13766 
13767   // FIXME: Ignore strict opcodes for now.
13768   if (N->isTargetStrictFPOpcode())
13769     return SDValue();
13770 
13771   // Try to form widening FMA.
13772   SDValue Op0 = N->getOperand(0);
13773   SDValue Op1 = N->getOperand(1);
13774   SDValue Mask = N->getOperand(3);
13775   SDValue VL = N->getOperand(4);
13776 
13777   if (Op0.getOpcode() != RISCVISD::FP_EXTEND_VL ||
13778       Op1.getOpcode() != RISCVISD::FP_EXTEND_VL)
13779     return SDValue();
13780 
13781   // TODO: Refactor to handle more complex cases similar to
13782   // combineBinOp_VLToVWBinOp_VL.
13783   if ((!Op0.hasOneUse() || !Op1.hasOneUse()) &&
13784       (Op0 != Op1 || !Op0->hasNUsesOfValue(2, 0)))
13785     return SDValue();
13786 
13787   // Check the mask and VL are the same.
13788   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL ||
13789       Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
13790     return SDValue();
13791 
13792   unsigned NewOpc;
13793   switch (N->getOpcode()) {
13794   default:
13795     llvm_unreachable("Unexpected opcode");
13796   case RISCVISD::VFMADD_VL:
13797     NewOpc = RISCVISD::VFWMADD_VL;
13798     break;
13799   case RISCVISD::VFNMSUB_VL:
13800     NewOpc = RISCVISD::VFWNMSUB_VL;
13801     break;
13802   case RISCVISD::VFNMADD_VL:
13803     NewOpc = RISCVISD::VFWNMADD_VL;
13804     break;
13805   case RISCVISD::VFMSUB_VL:
13806     NewOpc = RISCVISD::VFWMSUB_VL;
13807     break;
13808   }
13809 
13810   Op0 = Op0.getOperand(0);
13811   Op1 = Op1.getOperand(0);
13812 
13813   return DAG.getNode(NewOpc, SDLoc(N), N->getValueType(0), Op0, Op1,
13814                      N->getOperand(2), Mask, VL);
13815 }
13816 
13817 static SDValue performVFMUL_VLCombine(SDNode *N, SelectionDAG &DAG,
13818                                       const RISCVSubtarget &Subtarget) {
13819   if (N->getValueType(0).isScalableVector() &&
13820       N->getValueType(0).getVectorElementType() == MVT::f32 &&
13821       (Subtarget.hasVInstructionsF16Minimal() &&
13822        !Subtarget.hasVInstructionsF16())) {
13823     return SDValue();
13824   }
13825 
13826   // FIXME: Ignore strict opcodes for now.
13827   assert(!N->isTargetStrictFPOpcode() && "Unexpected opcode");
13828 
13829   // Try to form widening multiply.
13830   SDValue Op0 = N->getOperand(0);
13831   SDValue Op1 = N->getOperand(1);
13832   SDValue Merge = N->getOperand(2);
13833   SDValue Mask = N->getOperand(3);
13834   SDValue VL = N->getOperand(4);
13835 
13836   if (Op0.getOpcode() != RISCVISD::FP_EXTEND_VL ||
13837       Op1.getOpcode() != RISCVISD::FP_EXTEND_VL)
13838     return SDValue();
13839 
13840   // TODO: Refactor to handle more complex cases similar to
13841   // combineBinOp_VLToVWBinOp_VL.
13842   if ((!Op0.hasOneUse() || !Op1.hasOneUse()) &&
13843       (Op0 != Op1 || !Op0->hasNUsesOfValue(2, 0)))
13844     return SDValue();
13845 
13846   // Check the mask and VL are the same.
13847   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL ||
13848       Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
13849     return SDValue();
13850 
13851   Op0 = Op0.getOperand(0);
13852   Op1 = Op1.getOperand(0);
13853 
13854   return DAG.getNode(RISCVISD::VFWMUL_VL, SDLoc(N), N->getValueType(0), Op0,
13855                      Op1, Merge, Mask, VL);
13856 }
13857 
13858 static SDValue performFADDSUB_VLCombine(SDNode *N, SelectionDAG &DAG,
13859                                         const RISCVSubtarget &Subtarget) {
13860   if (N->getValueType(0).isScalableVector() &&
13861       N->getValueType(0).getVectorElementType() == MVT::f32 &&
13862       (Subtarget.hasVInstructionsF16Minimal() &&
13863        !Subtarget.hasVInstructionsF16())) {
13864     return SDValue();
13865   }
13866 
13867   SDValue Op0 = N->getOperand(0);
13868   SDValue Op1 = N->getOperand(1);
13869   SDValue Merge = N->getOperand(2);
13870   SDValue Mask = N->getOperand(3);
13871   SDValue VL = N->getOperand(4);
13872 
13873   bool IsAdd = N->getOpcode() == RISCVISD::FADD_VL;
13874 
13875   // Look for foldable FP_EXTENDS.
13876   bool Op0IsExtend =
13877       Op0.getOpcode() == RISCVISD::FP_EXTEND_VL &&
13878       (Op0.hasOneUse() || (Op0 == Op1 && Op0->hasNUsesOfValue(2, 0)));
13879   bool Op1IsExtend =
13880       (Op0 == Op1 && Op0IsExtend) ||
13881       (Op1.getOpcode() == RISCVISD::FP_EXTEND_VL && Op1.hasOneUse());
13882 
13883   // Check the mask and VL.
13884   if (Op0IsExtend && (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL))
13885     Op0IsExtend = false;
13886   if (Op1IsExtend && (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL))
13887     Op1IsExtend = false;
13888 
13889   // Canonicalize.
13890   if (!Op1IsExtend) {
13891     // Sub requires at least operand 1 to be an extend.
13892     if (!IsAdd)
13893       return SDValue();
13894 
13895     // Add is commutable, if the other operand is foldable, swap them.
13896     if (!Op0IsExtend)
13897       return SDValue();
13898 
13899     std::swap(Op0, Op1);
13900     std::swap(Op0IsExtend, Op1IsExtend);
13901   }
13902 
13903   // Op1 is a foldable extend. Op0 might be foldable.
13904   Op1 = Op1.getOperand(0);
13905   if (Op0IsExtend)
13906     Op0 = Op0.getOperand(0);
13907 
13908   unsigned Opc;
13909   if (IsAdd)
13910     Opc = Op0IsExtend ? RISCVISD::VFWADD_VL : RISCVISD::VFWADD_W_VL;
13911   else
13912     Opc = Op0IsExtend ? RISCVISD::VFWSUB_VL : RISCVISD::VFWSUB_W_VL;
13913 
13914   return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op0, Op1, Merge, Mask,
13915                      VL);
13916 }
13917 
13918 static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
13919                                  const RISCVSubtarget &Subtarget) {
13920   assert(N->getOpcode() == ISD::SRA && "Unexpected opcode");
13921 
13922   if (N->getValueType(0) != MVT::i64 || !Subtarget.is64Bit())
13923     return SDValue();
13924 
13925   if (!isa<ConstantSDNode>(N->getOperand(1)))
13926     return SDValue();
13927   uint64_t ShAmt = N->getConstantOperandVal(1);
13928   if (ShAmt > 32)
13929     return SDValue();
13930 
13931   SDValue N0 = N->getOperand(0);
13932 
13933   // Combine (sra (sext_inreg (shl X, C1), i32), C2) ->
13934   // (sra (shl X, C1+32), C2+32) so it gets selected as SLLI+SRAI instead of
13935   // SLLIW+SRAIW. SLLI+SRAI have compressed forms.
13936   if (ShAmt < 32 &&
13937       N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse() &&
13938       cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32 &&
13939       N0.getOperand(0).getOpcode() == ISD::SHL && N0.getOperand(0).hasOneUse() &&
13940       isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
13941     uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
13942     if (LShAmt < 32) {
13943       SDLoc ShlDL(N0.getOperand(0));
13944       SDValue Shl = DAG.getNode(ISD::SHL, ShlDL, MVT::i64,
13945                                 N0.getOperand(0).getOperand(0),
13946                                 DAG.getConstant(LShAmt + 32, ShlDL, MVT::i64));
13947       SDLoc DL(N);
13948       return DAG.getNode(ISD::SRA, DL, MVT::i64, Shl,
13949                          DAG.getConstant(ShAmt + 32, DL, MVT::i64));
13950     }
13951   }
13952 
13953   // Combine (sra (shl X, 32), 32 - C) -> (shl (sext_inreg X, i32), C)
13954   // FIXME: Should this be a generic combine? There's a similar combine on X86.
13955   //
13956   // Also try these folds where an add or sub is in the middle.
13957   // (sra (add (shl X, 32), C1), 32 - C) -> (shl (sext_inreg (add X, C1), C)
13958   // (sra (sub C1, (shl X, 32)), 32 - C) -> (shl (sext_inreg (sub C1, X), C)
13959   SDValue Shl;
13960   ConstantSDNode *AddC = nullptr;
13961 
13962   // We might have an ADD or SUB between the SRA and SHL.
13963   bool IsAdd = N0.getOpcode() == ISD::ADD;
13964   if ((IsAdd || N0.getOpcode() == ISD::SUB)) {
13965     // Other operand needs to be a constant we can modify.
13966     AddC = dyn_cast<ConstantSDNode>(N0.getOperand(IsAdd ? 1 : 0));
13967     if (!AddC)
13968       return SDValue();
13969 
13970     // AddC needs to have at least 32 trailing zeros.
13971     if (AddC->getAPIntValue().countr_zero() < 32)
13972       return SDValue();
13973 
13974     // All users should be a shift by constant less than or equal to 32. This
13975     // ensures we'll do this optimization for each of them to produce an
13976     // add/sub+sext_inreg they can all share.
13977     for (SDNode *U : N0->uses()) {
13978       if (U->getOpcode() != ISD::SRA ||
13979           !isa<ConstantSDNode>(U->getOperand(1)) ||
13980           cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() > 32)
13981         return SDValue();
13982     }
13983 
13984     Shl = N0.getOperand(IsAdd ? 0 : 1);
13985   } else {
13986     // Not an ADD or SUB.
13987     Shl = N0;
13988   }
13989 
13990   // Look for a shift left by 32.
13991   if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1)) ||
13992       Shl.getConstantOperandVal(1) != 32)
13993     return SDValue();
13994 
13995   // We if we didn't look through an add/sub, then the shl should have one use.
13996   // If we did look through an add/sub, the sext_inreg we create is free so
13997   // we're only creating 2 new instructions. It's enough to only remove the
13998   // original sra+add/sub.
13999   if (!AddC && !Shl.hasOneUse())
14000     return SDValue();
14001 
14002   SDLoc DL(N);
14003   SDValue In = Shl.getOperand(0);
14004 
14005   // If we looked through an ADD or SUB, we need to rebuild it with the shifted
14006   // constant.
14007   if (AddC) {
14008     SDValue ShiftedAddC =
14009         DAG.getConstant(AddC->getAPIntValue().lshr(32), DL, MVT::i64);
14010     if (IsAdd)
14011       In = DAG.getNode(ISD::ADD, DL, MVT::i64, In, ShiftedAddC);
14012     else
14013       In = DAG.getNode(ISD::SUB, DL, MVT::i64, ShiftedAddC, In);
14014   }
14015 
14016   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, In,
14017                              DAG.getValueType(MVT::i32));
14018   if (ShAmt == 32)
14019     return SExt;
14020 
14021   return DAG.getNode(
14022       ISD::SHL, DL, MVT::i64, SExt,
14023       DAG.getConstant(32 - ShAmt, DL, MVT::i64));
14024 }
14025 
14026 // Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
14027 // the result is used as the conditon of a br_cc or select_cc we can invert,
14028 // inverting the setcc is free, and Z is 0/1. Caller will invert the
14029 // br_cc/select_cc.
14030 static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
14031   bool IsAnd = Cond.getOpcode() == ISD::AND;
14032   if (!IsAnd && Cond.getOpcode() != ISD::OR)
14033     return SDValue();
14034 
14035   if (!Cond.hasOneUse())
14036     return SDValue();
14037 
14038   SDValue Setcc = Cond.getOperand(0);
14039   SDValue Xor = Cond.getOperand(1);
14040   // Canonicalize setcc to LHS.
14041   if (Setcc.getOpcode() != ISD::SETCC)
14042     std::swap(Setcc, Xor);
14043   // LHS should be a setcc and RHS should be an xor.
14044   if (Setcc.getOpcode() != ISD::SETCC || !Setcc.hasOneUse() ||
14045       Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
14046     return SDValue();
14047 
14048   // If the condition is an And, SimplifyDemandedBits may have changed
14049   // (xor Z, 1) to (not Z).
14050   SDValue Xor1 = Xor.getOperand(1);
14051   if (!isOneConstant(Xor1) && !(IsAnd && isAllOnesConstant(Xor1)))
14052     return SDValue();
14053 
14054   EVT VT = Cond.getValueType();
14055   SDValue Xor0 = Xor.getOperand(0);
14056 
14057   // The LHS of the xor needs to be 0/1.
14058   APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
14059   if (!DAG.MaskedValueIsZero(Xor0, Mask))
14060     return SDValue();
14061 
14062   // We can only invert integer setccs.
14063   EVT SetCCOpVT = Setcc.getOperand(0).getValueType();
14064   if (!SetCCOpVT.isScalarInteger())
14065     return SDValue();
14066 
14067   ISD::CondCode CCVal = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
14068   if (ISD::isIntEqualitySetCC(CCVal)) {
14069     CCVal = ISD::getSetCCInverse(CCVal, SetCCOpVT);
14070     Setcc = DAG.getSetCC(SDLoc(Setcc), VT, Setcc.getOperand(0),
14071                          Setcc.getOperand(1), CCVal);
14072   } else if (CCVal == ISD::SETLT && isNullConstant(Setcc.getOperand(0))) {
14073     // Invert (setlt 0, X) by converting to (setlt X, 1).
14074     Setcc = DAG.getSetCC(SDLoc(Setcc), VT, Setcc.getOperand(1),
14075                          DAG.getConstant(1, SDLoc(Setcc), VT), CCVal);
14076   } else if (CCVal == ISD::SETLT && isOneConstant(Setcc.getOperand(1))) {
14077     // (setlt X, 1) by converting to (setlt 0, X).
14078     Setcc = DAG.getSetCC(SDLoc(Setcc), VT,
14079                          DAG.getConstant(0, SDLoc(Setcc), VT),
14080                          Setcc.getOperand(0), CCVal);
14081   } else
14082     return SDValue();
14083 
14084   unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
14085   return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
14086 }
14087 
14088 // Perform common combines for BR_CC and SELECT_CC condtions.
14089 static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
14090                        SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
14091   ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
14092 
14093   // As far as arithmetic right shift always saves the sign,
14094   // shift can be omitted.
14095   // Fold setlt (sra X, N), 0 -> setlt X, 0 and
14096   // setge (sra X, N), 0 -> setge X, 0
14097   if (isNullConstant(RHS) && (CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
14098       LHS.getOpcode() == ISD::SRA) {
14099     LHS = LHS.getOperand(0);
14100     return true;
14101   }
14102 
14103   if (!ISD::isIntEqualitySetCC(CCVal))
14104     return false;
14105 
14106   // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
14107   // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
14108   if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
14109       LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
14110     // If we're looking for eq 0 instead of ne 0, we need to invert the
14111     // condition.
14112     bool Invert = CCVal == ISD::SETEQ;
14113     CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
14114     if (Invert)
14115       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
14116 
14117     RHS = LHS.getOperand(1);
14118     LHS = LHS.getOperand(0);
14119     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
14120 
14121     CC = DAG.getCondCode(CCVal);
14122     return true;
14123   }
14124 
14125   // Fold ((xor X, Y), 0, eq/ne) -> (X, Y, eq/ne)
14126   if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) {
14127     RHS = LHS.getOperand(1);
14128     LHS = LHS.getOperand(0);
14129     return true;
14130   }
14131 
14132   // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, XLen-1-C), 0, ge/lt)
14133   if (isNullConstant(RHS) && LHS.getOpcode() == ISD::SRL && LHS.hasOneUse() &&
14134       LHS.getOperand(1).getOpcode() == ISD::Constant) {
14135     SDValue LHS0 = LHS.getOperand(0);
14136     if (LHS0.getOpcode() == ISD::AND &&
14137         LHS0.getOperand(1).getOpcode() == ISD::Constant) {
14138       uint64_t Mask = LHS0.getConstantOperandVal(1);
14139       uint64_t ShAmt = LHS.getConstantOperandVal(1);
14140       if (isPowerOf2_64(Mask) && Log2_64(Mask) == ShAmt) {
14141         CCVal = CCVal == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
14142         CC = DAG.getCondCode(CCVal);
14143 
14144         ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
14145         LHS = LHS0.getOperand(0);
14146         if (ShAmt != 0)
14147           LHS =
14148               DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS0.getOperand(0),
14149                           DAG.getConstant(ShAmt, DL, LHS.getValueType()));
14150         return true;
14151       }
14152     }
14153   }
14154 
14155   // (X, 1, setne) -> // (X, 0, seteq) if we can prove X is 0/1.
14156   // This can occur when legalizing some floating point comparisons.
14157   APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
14158   if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
14159     CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
14160     CC = DAG.getCondCode(CCVal);
14161     RHS = DAG.getConstant(0, DL, LHS.getValueType());
14162     return true;
14163   }
14164 
14165   if (isNullConstant(RHS)) {
14166     if (SDValue NewCond = tryDemorganOfBooleanCondition(LHS, DAG)) {
14167       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
14168       CC = DAG.getCondCode(CCVal);
14169       LHS = NewCond;
14170       return true;
14171     }
14172   }
14173 
14174   return false;
14175 }
14176 
14177 // Fold
14178 // (select C, (add Y, X), Y) -> (add Y, (select C, X, 0)).
14179 // (select C, (sub Y, X), Y) -> (sub Y, (select C, X, 0)).
14180 // (select C, (or Y, X), Y)  -> (or Y, (select C, X, 0)).
14181 // (select C, (xor Y, X), Y) -> (xor Y, (select C, X, 0)).
14182 static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
14183                                    SDValue TrueVal, SDValue FalseVal,
14184                                    bool Swapped) {
14185   bool Commutative = true;
14186   unsigned Opc = TrueVal.getOpcode();
14187   switch (Opc) {
14188   default:
14189     return SDValue();
14190   case ISD::SHL:
14191   case ISD::SRA:
14192   case ISD::SRL:
14193   case ISD::SUB:
14194     Commutative = false;
14195     break;
14196   case ISD::ADD:
14197   case ISD::OR:
14198   case ISD::XOR:
14199     break;
14200   }
14201 
14202   if (!TrueVal.hasOneUse() || isa<ConstantSDNode>(FalseVal))
14203     return SDValue();
14204 
14205   unsigned OpToFold;
14206   if (FalseVal == TrueVal.getOperand(0))
14207     OpToFold = 0;
14208   else if (Commutative && FalseVal == TrueVal.getOperand(1))
14209     OpToFold = 1;
14210   else
14211     return SDValue();
14212 
14213   EVT VT = N->getValueType(0);
14214   SDLoc DL(N);
14215   SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
14216   EVT OtherOpVT = OtherOp->getValueType(0);
14217   SDValue IdentityOperand =
14218       DAG.getNeutralElement(Opc, DL, OtherOpVT, N->getFlags());
14219   if (!Commutative)
14220     IdentityOperand = DAG.getConstant(0, DL, OtherOpVT);
14221   assert(IdentityOperand && "No identity operand!");
14222 
14223   if (Swapped)
14224     std::swap(OtherOp, IdentityOperand);
14225   SDValue NewSel =
14226       DAG.getSelect(DL, OtherOpVT, N->getOperand(0), OtherOp, IdentityOperand);
14227   return DAG.getNode(TrueVal.getOpcode(), DL, VT, FalseVal, NewSel);
14228 }
14229 
14230 // This tries to get rid of `select` and `icmp` that are being used to handle
14231 // `Targets` that do not support `cttz(0)`/`ctlz(0)`.
14232 static SDValue foldSelectOfCTTZOrCTLZ(SDNode *N, SelectionDAG &DAG) {
14233   SDValue Cond = N->getOperand(0);
14234 
14235   // This represents either CTTZ or CTLZ instruction.
14236   SDValue CountZeroes;
14237 
14238   SDValue ValOnZero;
14239 
14240   if (Cond.getOpcode() != ISD::SETCC)
14241     return SDValue();
14242 
14243   if (!isNullConstant(Cond->getOperand(1)))
14244     return SDValue();
14245 
14246   ISD::CondCode CCVal = cast<CondCodeSDNode>(Cond->getOperand(2))->get();
14247   if (CCVal == ISD::CondCode::SETEQ) {
14248     CountZeroes = N->getOperand(2);
14249     ValOnZero = N->getOperand(1);
14250   } else if (CCVal == ISD::CondCode::SETNE) {
14251     CountZeroes = N->getOperand(1);
14252     ValOnZero = N->getOperand(2);
14253   } else {
14254     return SDValue();
14255   }
14256 
14257   if (CountZeroes.getOpcode() == ISD::TRUNCATE ||
14258       CountZeroes.getOpcode() == ISD::ZERO_EXTEND)
14259     CountZeroes = CountZeroes.getOperand(0);
14260 
14261   if (CountZeroes.getOpcode() != ISD::CTTZ &&
14262       CountZeroes.getOpcode() != ISD::CTTZ_ZERO_UNDEF &&
14263       CountZeroes.getOpcode() != ISD::CTLZ &&
14264       CountZeroes.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
14265     return SDValue();
14266 
14267   if (!isNullConstant(ValOnZero))
14268     return SDValue();
14269 
14270   SDValue CountZeroesArgument = CountZeroes->getOperand(0);
14271   if (Cond->getOperand(0) != CountZeroesArgument)
14272     return SDValue();
14273 
14274   if (CountZeroes.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
14275     CountZeroes = DAG.getNode(ISD::CTTZ, SDLoc(CountZeroes),
14276                               CountZeroes.getValueType(), CountZeroesArgument);
14277   } else if (CountZeroes.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
14278     CountZeroes = DAG.getNode(ISD::CTLZ, SDLoc(CountZeroes),
14279                               CountZeroes.getValueType(), CountZeroesArgument);
14280   }
14281 
14282   unsigned BitWidth = CountZeroes.getValueSizeInBits();
14283   SDValue BitWidthMinusOne =
14284       DAG.getConstant(BitWidth - 1, SDLoc(N), CountZeroes.getValueType());
14285 
14286   auto AndNode = DAG.getNode(ISD::AND, SDLoc(N), CountZeroes.getValueType(),
14287                              CountZeroes, BitWidthMinusOne);
14288   return DAG.getZExtOrTrunc(AndNode, SDLoc(N), N->getValueType(0));
14289 }
14290 
14291 static SDValue useInversedSetcc(SDNode *N, SelectionDAG &DAG,
14292                                 const RISCVSubtarget &Subtarget) {
14293   SDValue Cond = N->getOperand(0);
14294   SDValue True = N->getOperand(1);
14295   SDValue False = N->getOperand(2);
14296   SDLoc DL(N);
14297   EVT VT = N->getValueType(0);
14298   EVT CondVT = Cond.getValueType();
14299 
14300   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
14301     return SDValue();
14302 
14303   // Replace (setcc eq (and x, C)) with (setcc ne (and x, C))) to generate
14304   // BEXTI, where C is power of 2.
14305   if (Subtarget.hasStdExtZbs() && VT.isScalarInteger() &&
14306       (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())) {
14307     SDValue LHS = Cond.getOperand(0);
14308     SDValue RHS = Cond.getOperand(1);
14309     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
14310     if (CC == ISD::SETEQ && LHS.getOpcode() == ISD::AND &&
14311         isa<ConstantSDNode>(LHS.getOperand(1)) && isNullConstant(RHS)) {
14312       uint64_t MaskVal = LHS.getConstantOperandVal(1);
14313       if (isPowerOf2_64(MaskVal) && !isInt<12>(MaskVal))
14314         return DAG.getSelect(DL, VT,
14315                              DAG.getSetCC(DL, CondVT, LHS, RHS, ISD::SETNE),
14316                              False, True);
14317     }
14318   }
14319   return SDValue();
14320 }
14321 
14322 static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
14323                                     const RISCVSubtarget &Subtarget) {
14324   if (SDValue Folded = foldSelectOfCTTZOrCTLZ(N, DAG))
14325     return Folded;
14326 
14327   if (SDValue V = useInversedSetcc(N, DAG, Subtarget))
14328     return V;
14329 
14330   if (Subtarget.hasShortForwardBranchOpt())
14331     return SDValue();
14332 
14333   SDValue TrueVal = N->getOperand(1);
14334   SDValue FalseVal = N->getOperand(2);
14335   if (SDValue V = tryFoldSelectIntoOp(N, DAG, TrueVal, FalseVal, /*Swapped*/false))
14336     return V;
14337   return tryFoldSelectIntoOp(N, DAG, FalseVal, TrueVal, /*Swapped*/true);
14338 }
14339 
14340 /// If we have a build_vector where each lane is binop X, C, where C
14341 /// is a constant (but not necessarily the same constant on all lanes),
14342 /// form binop (build_vector x1, x2, ...), (build_vector c1, c2, c3, ..).
14343 /// We assume that materializing a constant build vector will be no more
14344 /// expensive that performing O(n) binops.
14345 static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
14346                                           const RISCVSubtarget &Subtarget,
14347                                           const RISCVTargetLowering &TLI) {
14348   SDLoc DL(N);
14349   EVT VT = N->getValueType(0);
14350 
14351   assert(!VT.isScalableVector() && "unexpected build vector");
14352 
14353   if (VT.getVectorNumElements() == 1)
14354     return SDValue();
14355 
14356   const unsigned Opcode = N->op_begin()->getNode()->getOpcode();
14357   if (!TLI.isBinOp(Opcode))
14358     return SDValue();
14359 
14360   if (!TLI.isOperationLegalOrCustom(Opcode, VT) || !TLI.isTypeLegal(VT))
14361     return SDValue();
14362 
14363   SmallVector<SDValue> LHSOps;
14364   SmallVector<SDValue> RHSOps;
14365   for (SDValue Op : N->ops()) {
14366     if (Op.isUndef()) {
14367       // We can't form a divide or remainder from undef.
14368       if (!DAG.isSafeToSpeculativelyExecute(Opcode))
14369         return SDValue();
14370 
14371       LHSOps.push_back(Op);
14372       RHSOps.push_back(Op);
14373       continue;
14374     }
14375 
14376     // TODO: We can handle operations which have an neutral rhs value
14377     // (e.g. x + 0, a * 1 or a << 0), but we then have to keep track
14378     // of profit in a more explicit manner.
14379     if (Op.getOpcode() != Opcode || !Op.hasOneUse())
14380       return SDValue();
14381 
14382     LHSOps.push_back(Op.getOperand(0));
14383     if (!isa<ConstantSDNode>(Op.getOperand(1)) &&
14384         !isa<ConstantFPSDNode>(Op.getOperand(1)))
14385       return SDValue();
14386     // FIXME: Return failure if the RHS type doesn't match the LHS. Shifts may
14387     // have different LHS and RHS types.
14388     if (Op.getOperand(0).getValueType() != Op.getOperand(1).getValueType())
14389       return SDValue();
14390     RHSOps.push_back(Op.getOperand(1));
14391   }
14392 
14393   return DAG.getNode(Opcode, DL, VT, DAG.getBuildVector(VT, DL, LHSOps),
14394                      DAG.getBuildVector(VT, DL, RHSOps));
14395 }
14396 
14397 static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
14398                                                const RISCVSubtarget &Subtarget,
14399                                                const RISCVTargetLowering &TLI) {
14400   SDValue InVec = N->getOperand(0);
14401   SDValue InVal = N->getOperand(1);
14402   SDValue EltNo = N->getOperand(2);
14403   SDLoc DL(N);
14404 
14405   EVT VT = InVec.getValueType();
14406   if (VT.isScalableVector())
14407     return SDValue();
14408 
14409   if (!InVec.hasOneUse())
14410     return SDValue();
14411 
14412   // Given insert_vector_elt (binop a, VecC), (same_binop b, C2), Elt
14413   // move the insert_vector_elts into the arms of the binop.  Note that
14414   // the new RHS must be a constant.
14415   const unsigned InVecOpcode = InVec->getOpcode();
14416   if (InVecOpcode == InVal->getOpcode() && TLI.isBinOp(InVecOpcode) &&
14417       InVal.hasOneUse()) {
14418     SDValue InVecLHS = InVec->getOperand(0);
14419     SDValue InVecRHS = InVec->getOperand(1);
14420     SDValue InValLHS = InVal->getOperand(0);
14421     SDValue InValRHS = InVal->getOperand(1);
14422 
14423     if (!ISD::isBuildVectorOfConstantSDNodes(InVecRHS.getNode()))
14424       return SDValue();
14425     if (!isa<ConstantSDNode>(InValRHS) && !isa<ConstantFPSDNode>(InValRHS))
14426       return SDValue();
14427     // FIXME: Return failure if the RHS type doesn't match the LHS. Shifts may
14428     // have different LHS and RHS types.
14429     if (InVec.getOperand(0).getValueType() != InVec.getOperand(1).getValueType())
14430       return SDValue();
14431     SDValue LHS = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
14432                               InVecLHS, InValLHS, EltNo);
14433     SDValue RHS = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
14434                               InVecRHS, InValRHS, EltNo);
14435     return DAG.getNode(InVecOpcode, DL, VT, LHS, RHS);
14436   }
14437 
14438   // Given insert_vector_elt (concat_vectors ...), InVal, Elt
14439   // move the insert_vector_elt to the source operand of the concat_vector.
14440   if (InVec.getOpcode() != ISD::CONCAT_VECTORS)
14441     return SDValue();
14442 
14443   auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
14444   if (!IndexC)
14445     return SDValue();
14446   unsigned Elt = IndexC->getZExtValue();
14447 
14448   EVT ConcatVT = InVec.getOperand(0).getValueType();
14449   if (ConcatVT.getVectorElementType() != InVal.getValueType())
14450     return SDValue();
14451   unsigned ConcatNumElts = ConcatVT.getVectorNumElements();
14452   SDValue NewIdx = DAG.getConstant(Elt % ConcatNumElts, DL,
14453                                    EltNo.getValueType());
14454 
14455   unsigned ConcatOpIdx = Elt / ConcatNumElts;
14456   SDValue ConcatOp = InVec.getOperand(ConcatOpIdx);
14457   ConcatOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ConcatVT,
14458                          ConcatOp, InVal, NewIdx);
14459 
14460   SmallVector<SDValue> ConcatOps;
14461   ConcatOps.append(InVec->op_begin(), InVec->op_end());
14462   ConcatOps[ConcatOpIdx] = ConcatOp;
14463   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
14464 }
14465 
14466 // If we're concatenating a series of vector loads like
14467 // concat_vectors (load v4i8, p+0), (load v4i8, p+n), (load v4i8, p+n*2) ...
14468 // Then we can turn this into a strided load by widening the vector elements
14469 // vlse32 p, stride=n
14470 static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
14471                                             const RISCVSubtarget &Subtarget,
14472                                             const RISCVTargetLowering &TLI) {
14473   SDLoc DL(N);
14474   EVT VT = N->getValueType(0);
14475 
14476   // Only perform this combine on legal MVTs.
14477   if (!TLI.isTypeLegal(VT))
14478     return SDValue();
14479 
14480   // TODO: Potentially extend this to scalable vectors
14481   if (VT.isScalableVector())
14482     return SDValue();
14483 
14484   auto *BaseLd = dyn_cast<LoadSDNode>(N->getOperand(0));
14485   if (!BaseLd || !BaseLd->isSimple() || !ISD::isNormalLoad(BaseLd) ||
14486       !SDValue(BaseLd, 0).hasOneUse())
14487     return SDValue();
14488 
14489   EVT BaseLdVT = BaseLd->getValueType(0);
14490 
14491   // Go through the loads and check that they're strided
14492   SmallVector<LoadSDNode *> Lds;
14493   Lds.push_back(BaseLd);
14494   Align Align = BaseLd->getAlign();
14495   for (SDValue Op : N->ops().drop_front()) {
14496     auto *Ld = dyn_cast<LoadSDNode>(Op);
14497     if (!Ld || !Ld->isSimple() || !Op.hasOneUse() ||
14498         Ld->getChain() != BaseLd->getChain() || !ISD::isNormalLoad(Ld) ||
14499         Ld->getValueType(0) != BaseLdVT)
14500       return SDValue();
14501 
14502     Lds.push_back(Ld);
14503 
14504     // The common alignment is the most restrictive (smallest) of all the loads
14505     Align = std::min(Align, Ld->getAlign());
14506   }
14507 
14508   using PtrDiff = std::pair<std::variant<int64_t, SDValue>, bool>;
14509   auto GetPtrDiff = [&DAG](LoadSDNode *Ld1,
14510                            LoadSDNode *Ld2) -> std::optional<PtrDiff> {
14511     // If the load ptrs can be decomposed into a common (Base + Index) with a
14512     // common constant stride, then return the constant stride.
14513     BaseIndexOffset BIO1 = BaseIndexOffset::match(Ld1, DAG);
14514     BaseIndexOffset BIO2 = BaseIndexOffset::match(Ld2, DAG);
14515     if (BIO1.equalBaseIndex(BIO2, DAG))
14516       return {{BIO2.getOffset() - BIO1.getOffset(), false}};
14517 
14518     // Otherwise try to match (add LastPtr, Stride) or (add NextPtr, Stride)
14519     SDValue P1 = Ld1->getBasePtr();
14520     SDValue P2 = Ld2->getBasePtr();
14521     if (P2.getOpcode() == ISD::ADD && P2.getOperand(0) == P1)
14522       return {{P2.getOperand(1), false}};
14523     if (P1.getOpcode() == ISD::ADD && P1.getOperand(0) == P2)
14524       return {{P1.getOperand(1), true}};
14525 
14526     return std::nullopt;
14527   };
14528 
14529   // Get the distance between the first and second loads
14530   auto BaseDiff = GetPtrDiff(Lds[0], Lds[1]);
14531   if (!BaseDiff)
14532     return SDValue();
14533 
14534   // Check all the loads are the same distance apart
14535   for (auto *It = Lds.begin() + 1; It != Lds.end() - 1; It++)
14536     if (GetPtrDiff(*It, *std::next(It)) != BaseDiff)
14537       return SDValue();
14538 
14539   // TODO: At this point, we've successfully matched a generalized gather
14540   // load.  Maybe we should emit that, and then move the specialized
14541   // matchers above and below into a DAG combine?
14542 
14543   // Get the widened scalar type, e.g. v4i8 -> i64
14544   unsigned WideScalarBitWidth =
14545       BaseLdVT.getScalarSizeInBits() * BaseLdVT.getVectorNumElements();
14546   MVT WideScalarVT = MVT::getIntegerVT(WideScalarBitWidth);
14547 
14548   // Get the vector type for the strided load, e.g. 4 x v4i8 -> v4i64
14549   MVT WideVecVT = MVT::getVectorVT(WideScalarVT, N->getNumOperands());
14550   if (!TLI.isTypeLegal(WideVecVT))
14551     return SDValue();
14552 
14553   // Check that the operation is legal
14554   if (!TLI.isLegalStridedLoadStore(WideVecVT, Align))
14555     return SDValue();
14556 
14557   auto [StrideVariant, MustNegateStride] = *BaseDiff;
14558   SDValue Stride = std::holds_alternative<SDValue>(StrideVariant)
14559                        ? std::get<SDValue>(StrideVariant)
14560                        : DAG.getConstant(std::get<int64_t>(StrideVariant), DL,
14561                                          Lds[0]->getOffset().getValueType());
14562   if (MustNegateStride)
14563     Stride = DAG.getNegative(Stride, DL, Stride.getValueType());
14564 
14565   SDVTList VTs = DAG.getVTList({WideVecVT, MVT::Other});
14566   SDValue IntID =
14567     DAG.getTargetConstant(Intrinsic::riscv_masked_strided_load, DL,
14568                           Subtarget.getXLenVT());
14569 
14570   SDValue AllOneMask =
14571     DAG.getSplat(WideVecVT.changeVectorElementType(MVT::i1), DL,
14572                  DAG.getConstant(1, DL, MVT::i1));
14573 
14574   SDValue Ops[] = {BaseLd->getChain(),   IntID,  DAG.getUNDEF(WideVecVT),
14575                    BaseLd->getBasePtr(), Stride, AllOneMask};
14576 
14577   uint64_t MemSize;
14578   if (auto *ConstStride = dyn_cast<ConstantSDNode>(Stride);
14579       ConstStride && ConstStride->getSExtValue() >= 0)
14580     // total size = (elsize * n) + (stride - elsize) * (n-1)
14581     //            = elsize + stride * (n-1)
14582     MemSize = WideScalarVT.getSizeInBits() +
14583               ConstStride->getSExtValue() * (N->getNumOperands() - 1);
14584   else
14585     // If Stride isn't constant, then we can't know how much it will load
14586     MemSize = MemoryLocation::UnknownSize;
14587 
14588   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
14589       BaseLd->getPointerInfo(), BaseLd->getMemOperand()->getFlags(), MemSize,
14590       Align);
14591 
14592   SDValue StridedLoad = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
14593                                                 Ops, WideVecVT, MMO);
14594   for (SDValue Ld : N->ops())
14595     DAG.makeEquivalentMemoryOrdering(cast<LoadSDNode>(Ld), StridedLoad);
14596 
14597   return DAG.getBitcast(VT.getSimpleVT(), StridedLoad);
14598 }
14599 
14600 static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
14601                                const RISCVSubtarget &Subtarget) {
14602   assert(N->getOpcode() == RISCVISD::ADD_VL);
14603   SDValue Addend = N->getOperand(0);
14604   SDValue MulOp = N->getOperand(1);
14605   SDValue AddMergeOp = N->getOperand(2);
14606 
14607   if (!AddMergeOp.isUndef())
14608     return SDValue();
14609 
14610   auto IsVWMulOpc = [](unsigned Opc) {
14611     switch (Opc) {
14612     case RISCVISD::VWMUL_VL:
14613     case RISCVISD::VWMULU_VL:
14614     case RISCVISD::VWMULSU_VL:
14615       return true;
14616     default:
14617       return false;
14618     }
14619   };
14620 
14621   if (!IsVWMulOpc(MulOp.getOpcode()))
14622     std::swap(Addend, MulOp);
14623 
14624   if (!IsVWMulOpc(MulOp.getOpcode()))
14625     return SDValue();
14626 
14627   SDValue MulMergeOp = MulOp.getOperand(2);
14628 
14629   if (!MulMergeOp.isUndef())
14630     return SDValue();
14631 
14632   SDValue AddMask = N->getOperand(3);
14633   SDValue AddVL = N->getOperand(4);
14634   SDValue MulMask = MulOp.getOperand(3);
14635   SDValue MulVL = MulOp.getOperand(4);
14636 
14637   if (AddMask != MulMask || AddVL != MulVL)
14638     return SDValue();
14639 
14640   unsigned Opc = RISCVISD::VWMACC_VL + MulOp.getOpcode() - RISCVISD::VWMUL_VL;
14641   static_assert(RISCVISD::VWMACC_VL + 1 == RISCVISD::VWMACCU_VL,
14642                 "Unexpected opcode after VWMACC_VL");
14643   static_assert(RISCVISD::VWMACC_VL + 2 == RISCVISD::VWMACCSU_VL,
14644                 "Unexpected opcode after VWMACC_VL!");
14645   static_assert(RISCVISD::VWMUL_VL + 1 == RISCVISD::VWMULU_VL,
14646                 "Unexpected opcode after VWMUL_VL!");
14647   static_assert(RISCVISD::VWMUL_VL + 2 == RISCVISD::VWMULSU_VL,
14648                 "Unexpected opcode after VWMUL_VL!");
14649 
14650   SDLoc DL(N);
14651   EVT VT = N->getValueType(0);
14652   SDValue Ops[] = {MulOp.getOperand(0), MulOp.getOperand(1), Addend, AddMask,
14653                    AddVL};
14654   return DAG.getNode(Opc, DL, VT, Ops);
14655 }
14656 
14657 static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
14658                                            ISD::MemIndexType &IndexType,
14659                                            RISCVTargetLowering::DAGCombinerInfo &DCI) {
14660   if (!DCI.isBeforeLegalize())
14661     return false;
14662 
14663   SelectionDAG &DAG = DCI.DAG;
14664   const MVT XLenVT =
14665     DAG.getMachineFunction().getSubtarget<RISCVSubtarget>().getXLenVT();
14666 
14667   const EVT IndexVT = Index.getValueType();
14668 
14669   // RISC-V indexed loads only support the "unsigned unscaled" addressing
14670   // mode, so anything else must be manually legalized.
14671   if (!isIndexTypeSigned(IndexType))
14672     return false;
14673 
14674   if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
14675     // Any index legalization should first promote to XLenVT, so we don't lose
14676     // bits when scaling. This may create an illegal index type so we let
14677     // LLVM's legalization take care of the splitting.
14678     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
14679     Index = DAG.getNode(ISD::SIGN_EXTEND, DL,
14680                         IndexVT.changeVectorElementType(XLenVT), Index);
14681   }
14682   IndexType = ISD::UNSIGNED_SCALED;
14683   return true;
14684 }
14685 
14686 /// Match the index vector of a scatter or gather node as the shuffle mask
14687 /// which performs the rearrangement if possible.  Will only match if
14688 /// all lanes are touched, and thus replacing the scatter or gather with
14689 /// a unit strided access and shuffle is legal.
14690 static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
14691                                 SmallVector<int> &ShuffleMask) {
14692   if (!ISD::isConstantSplatVectorAllOnes(Mask.getNode()))
14693     return false;
14694   if (!ISD::isBuildVectorOfConstantSDNodes(Index.getNode()))
14695     return false;
14696 
14697   const unsigned ElementSize = VT.getScalarStoreSize();
14698   const unsigned NumElems = VT.getVectorNumElements();
14699 
14700   // Create the shuffle mask and check all bits active
14701   assert(ShuffleMask.empty());
14702   BitVector ActiveLanes(NumElems);
14703   for (unsigned i = 0; i < Index->getNumOperands(); i++) {
14704     // TODO: We've found an active bit of UB, and could be
14705     // more aggressive here if desired.
14706     if (Index->getOperand(i)->isUndef())
14707       return false;
14708     uint64_t C = Index->getConstantOperandVal(i);
14709     if (C % ElementSize != 0)
14710       return false;
14711     C = C / ElementSize;
14712     if (C >= NumElems)
14713       return false;
14714     ShuffleMask.push_back(C);
14715     ActiveLanes.set(C);
14716   }
14717   return ActiveLanes.all();
14718 }
14719 
14720 /// Match the index of a gather or scatter operation as an operation
14721 /// with twice the element width and half the number of elements.  This is
14722 /// generally profitable (if legal) because these operations are linear
14723 /// in VL, so even if we cause some extract VTYPE/VL toggles, we still
14724 /// come out ahead.
14725 static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
14726                                 Align BaseAlign, const RISCVSubtarget &ST) {
14727   if (!ISD::isConstantSplatVectorAllOnes(Mask.getNode()))
14728     return false;
14729   if (!ISD::isBuildVectorOfConstantSDNodes(Index.getNode()))
14730     return false;
14731 
14732   // Attempt a doubling.  If we can use a element type 4x or 8x in
14733   // size, this will happen via multiply iterations of the transform.
14734   const unsigned NumElems = VT.getVectorNumElements();
14735   if (NumElems % 2 != 0)
14736     return false;
14737 
14738   const unsigned ElementSize = VT.getScalarStoreSize();
14739   const unsigned WiderElementSize = ElementSize * 2;
14740   if (WiderElementSize > ST.getELen()/8)
14741     return false;
14742 
14743   if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
14744     return false;
14745 
14746   for (unsigned i = 0; i < Index->getNumOperands(); i++) {
14747     // TODO: We've found an active bit of UB, and could be
14748     // more aggressive here if desired.
14749     if (Index->getOperand(i)->isUndef())
14750       return false;
14751     // TODO: This offset check is too strict if we support fully
14752     // misaligned memory operations.
14753     uint64_t C = Index->getConstantOperandVal(i);
14754     if (i % 2 == 0) {
14755       if (C % WiderElementSize != 0)
14756         return false;
14757       continue;
14758     }
14759     uint64_t Last = Index->getConstantOperandVal(i-1);
14760     if (C != Last + ElementSize)
14761       return false;
14762   }
14763   return true;
14764 }
14765 
14766 
14767 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
14768                                                DAGCombinerInfo &DCI) const {
14769   SelectionDAG &DAG = DCI.DAG;
14770   const MVT XLenVT = Subtarget.getXLenVT();
14771   SDLoc DL(N);
14772 
14773   // Helper to call SimplifyDemandedBits on an operand of N where only some low
14774   // bits are demanded. N will be added to the Worklist if it was not deleted.
14775   // Caller should return SDValue(N, 0) if this returns true.
14776   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
14777     SDValue Op = N->getOperand(OpNo);
14778     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
14779     if (!SimplifyDemandedBits(Op, Mask, DCI))
14780       return false;
14781 
14782     if (N->getOpcode() != ISD::DELETED_NODE)
14783       DCI.AddToWorklist(N);
14784     return true;
14785   };
14786 
14787   switch (N->getOpcode()) {
14788   default:
14789     break;
14790   case RISCVISD::SplitF64: {
14791     SDValue Op0 = N->getOperand(0);
14792     // If the input to SplitF64 is just BuildPairF64 then the operation is
14793     // redundant. Instead, use BuildPairF64's operands directly.
14794     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
14795       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
14796 
14797     if (Op0->isUndef()) {
14798       SDValue Lo = DAG.getUNDEF(MVT::i32);
14799       SDValue Hi = DAG.getUNDEF(MVT::i32);
14800       return DCI.CombineTo(N, Lo, Hi);
14801     }
14802 
14803     // It's cheaper to materialise two 32-bit integers than to load a double
14804     // from the constant pool and transfer it to integer registers through the
14805     // stack.
14806     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
14807       APInt V = C->getValueAPF().bitcastToAPInt();
14808       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
14809       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
14810       return DCI.CombineTo(N, Lo, Hi);
14811     }
14812 
14813     // This is a target-specific version of a DAGCombine performed in
14814     // DAGCombiner::visitBITCAST. It performs the equivalent of:
14815     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
14816     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
14817     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
14818         !Op0.getNode()->hasOneUse())
14819       break;
14820     SDValue NewSplitF64 =
14821         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
14822                     Op0.getOperand(0));
14823     SDValue Lo = NewSplitF64.getValue(0);
14824     SDValue Hi = NewSplitF64.getValue(1);
14825     APInt SignBit = APInt::getSignMask(32);
14826     if (Op0.getOpcode() == ISD::FNEG) {
14827       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
14828                                   DAG.getConstant(SignBit, DL, MVT::i32));
14829       return DCI.CombineTo(N, Lo, NewHi);
14830     }
14831     assert(Op0.getOpcode() == ISD::FABS);
14832     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
14833                                 DAG.getConstant(~SignBit, DL, MVT::i32));
14834     return DCI.CombineTo(N, Lo, NewHi);
14835   }
14836   case RISCVISD::SLLW:
14837   case RISCVISD::SRAW:
14838   case RISCVISD::SRLW:
14839   case RISCVISD::RORW:
14840   case RISCVISD::ROLW: {
14841     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
14842     if (SimplifyDemandedLowBitsHelper(0, 32) ||
14843         SimplifyDemandedLowBitsHelper(1, 5))
14844       return SDValue(N, 0);
14845 
14846     break;
14847   }
14848   case RISCVISD::CLZW:
14849   case RISCVISD::CTZW: {
14850     // Only the lower 32 bits of the first operand are read
14851     if (SimplifyDemandedLowBitsHelper(0, 32))
14852       return SDValue(N, 0);
14853     break;
14854   }
14855   case RISCVISD::FMV_W_X_RV64: {
14856     // If the input to FMV_W_X_RV64 is just FMV_X_ANYEXTW_RV64 the the
14857     // conversion is unnecessary and can be replaced with the
14858     // FMV_X_ANYEXTW_RV64 operand.
14859     SDValue Op0 = N->getOperand(0);
14860     if (Op0.getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64)
14861       return Op0.getOperand(0);
14862     break;
14863   }
14864   case RISCVISD::FMV_X_ANYEXTH:
14865   case RISCVISD::FMV_X_ANYEXTW_RV64: {
14866     SDLoc DL(N);
14867     SDValue Op0 = N->getOperand(0);
14868     MVT VT = N->getSimpleValueType(0);
14869     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
14870     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
14871     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
14872     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
14873          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
14874         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
14875          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
14876       assert(Op0.getOperand(0).getValueType() == VT &&
14877              "Unexpected value type!");
14878       return Op0.getOperand(0);
14879     }
14880 
14881     // This is a target-specific version of a DAGCombine performed in
14882     // DAGCombiner::visitBITCAST. It performs the equivalent of:
14883     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
14884     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
14885     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
14886         !Op0.getNode()->hasOneUse())
14887       break;
14888     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
14889     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
14890     APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
14891     if (Op0.getOpcode() == ISD::FNEG)
14892       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
14893                          DAG.getConstant(SignBit, DL, VT));
14894 
14895     assert(Op0.getOpcode() == ISD::FABS);
14896     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
14897                        DAG.getConstant(~SignBit, DL, VT));
14898   }
14899   case ISD::ADD:
14900     return performADDCombine(N, DAG, Subtarget);
14901   case ISD::SUB:
14902     return performSUBCombine(N, DAG, Subtarget);
14903   case ISD::AND:
14904     return performANDCombine(N, DCI, Subtarget);
14905   case ISD::OR:
14906     return performORCombine(N, DCI, Subtarget);
14907   case ISD::XOR:
14908     return performXORCombine(N, DAG, Subtarget);
14909   case ISD::MUL:
14910     return performMULCombine(N, DAG);
14911   case ISD::FADD:
14912   case ISD::UMAX:
14913   case ISD::UMIN:
14914   case ISD::SMAX:
14915   case ISD::SMIN:
14916   case ISD::FMAXNUM:
14917   case ISD::FMINNUM: {
14918     if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
14919       return V;
14920     if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
14921       return V;
14922     return SDValue();
14923   }
14924   case ISD::SETCC:
14925     return performSETCCCombine(N, DAG, Subtarget);
14926   case ISD::SIGN_EXTEND_INREG:
14927     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
14928   case ISD::ZERO_EXTEND:
14929     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
14930     // type legalization. This is safe because fp_to_uint produces poison if
14931     // it overflows.
14932     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
14933       SDValue Src = N->getOperand(0);
14934       if (Src.getOpcode() == ISD::FP_TO_UINT &&
14935           isTypeLegal(Src.getOperand(0).getValueType()))
14936         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
14937                            Src.getOperand(0));
14938       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
14939           isTypeLegal(Src.getOperand(1).getValueType())) {
14940         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
14941         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
14942                                   Src.getOperand(0), Src.getOperand(1));
14943         DCI.CombineTo(N, Res);
14944         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
14945         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
14946         return SDValue(N, 0); // Return N so it doesn't get rechecked.
14947       }
14948     }
14949     return SDValue();
14950   case RISCVISD::TRUNCATE_VECTOR_VL: {
14951     // trunc (sra sext (X), zext (Y)) -> sra (X, smin (Y, scalarsize(Y) - 1))
14952     // This would be benefit for the cases where X and Y are both the same value
14953     // type of low precision vectors. Since the truncate would be lowered into
14954     // n-levels TRUNCATE_VECTOR_VL to satisfy RVV's SEW*2->SEW truncate
14955     // restriction, such pattern would be expanded into a series of "vsetvli"
14956     // and "vnsrl" instructions later to reach this point.
14957     auto IsTruncNode = [](SDValue V) {
14958       if (V.getOpcode() != RISCVISD::TRUNCATE_VECTOR_VL)
14959         return false;
14960       SDValue VL = V.getOperand(2);
14961       auto *C = dyn_cast<ConstantSDNode>(VL);
14962       // Assume all TRUNCATE_VECTOR_VL nodes use VLMAX for VMSET_VL operand
14963       bool IsVLMAXForVMSET = (C && C->isAllOnes()) ||
14964                              (isa<RegisterSDNode>(VL) &&
14965                               cast<RegisterSDNode>(VL)->getReg() == RISCV::X0);
14966       return V.getOperand(1).getOpcode() == RISCVISD::VMSET_VL &&
14967              IsVLMAXForVMSET;
14968     };
14969 
14970     SDValue Op = N->getOperand(0);
14971 
14972     // We need to first find the inner level of TRUNCATE_VECTOR_VL node
14973     // to distinguish such pattern.
14974     while (IsTruncNode(Op)) {
14975       if (!Op.hasOneUse())
14976         return SDValue();
14977       Op = Op.getOperand(0);
14978     }
14979 
14980     if (Op.getOpcode() == ISD::SRA && Op.hasOneUse()) {
14981       SDValue N0 = Op.getOperand(0);
14982       SDValue N1 = Op.getOperand(1);
14983       if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
14984           N1.getOpcode() == ISD::ZERO_EXTEND && N1.hasOneUse()) {
14985         SDValue N00 = N0.getOperand(0);
14986         SDValue N10 = N1.getOperand(0);
14987         if (N00.getValueType().isVector() &&
14988             N00.getValueType() == N10.getValueType() &&
14989             N->getValueType(0) == N10.getValueType()) {
14990           unsigned MaxShAmt = N10.getValueType().getScalarSizeInBits() - 1;
14991           SDValue SMin = DAG.getNode(
14992               ISD::SMIN, SDLoc(N1), N->getValueType(0), N10,
14993               DAG.getConstant(MaxShAmt, SDLoc(N1), N->getValueType(0)));
14994           return DAG.getNode(ISD::SRA, SDLoc(N), N->getValueType(0), N00, SMin);
14995         }
14996       }
14997     }
14998     break;
14999   }
15000   case ISD::TRUNCATE:
15001     return performTRUNCATECombine(N, DAG, Subtarget);
15002   case ISD::SELECT:
15003     return performSELECTCombine(N, DAG, Subtarget);
15004   case RISCVISD::CZERO_EQZ:
15005   case RISCVISD::CZERO_NEZ:
15006     // czero_eq X, (xor Y, 1) -> czero_ne X, Y if Y is 0 or 1.
15007     // czero_ne X, (xor Y, 1) -> czero_eq X, Y if Y is 0 or 1.
15008     if (N->getOperand(1).getOpcode() == ISD::XOR &&
15009         isOneConstant(N->getOperand(1).getOperand(1))) {
15010       SDValue Cond = N->getOperand(1).getOperand(0);
15011       APInt Mask = APInt::getBitsSetFrom(Cond.getValueSizeInBits(), 1);
15012       if (DAG.MaskedValueIsZero(Cond, Mask)) {
15013         unsigned NewOpc = N->getOpcode() == RISCVISD::CZERO_EQZ
15014                               ? RISCVISD::CZERO_NEZ
15015                               : RISCVISD::CZERO_EQZ;
15016         return DAG.getNode(NewOpc, SDLoc(N), N->getValueType(0),
15017                            N->getOperand(0), Cond);
15018       }
15019     }
15020     return SDValue();
15021 
15022   case RISCVISD::SELECT_CC: {
15023     // Transform
15024     SDValue LHS = N->getOperand(0);
15025     SDValue RHS = N->getOperand(1);
15026     SDValue CC = N->getOperand(2);
15027     ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
15028     SDValue TrueV = N->getOperand(3);
15029     SDValue FalseV = N->getOperand(4);
15030     SDLoc DL(N);
15031     EVT VT = N->getValueType(0);
15032 
15033     // If the True and False values are the same, we don't need a select_cc.
15034     if (TrueV == FalseV)
15035       return TrueV;
15036 
15037     // (select (x < 0), y, z)  -> x >> (XLEN - 1) & (y - z) + z
15038     // (select (x >= 0), y, z) -> x >> (XLEN - 1) & (z - y) + y
15039     if (!Subtarget.hasShortForwardBranchOpt() && isa<ConstantSDNode>(TrueV) &&
15040         isa<ConstantSDNode>(FalseV) && isNullConstant(RHS) &&
15041         (CCVal == ISD::CondCode::SETLT || CCVal == ISD::CondCode::SETGE)) {
15042       if (CCVal == ISD::CondCode::SETGE)
15043         std::swap(TrueV, FalseV);
15044 
15045       int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
15046       int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
15047       // Only handle simm12, if it is not in this range, it can be considered as
15048       // register.
15049       if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
15050           isInt<12>(TrueSImm - FalseSImm)) {
15051         SDValue SRA =
15052             DAG.getNode(ISD::SRA, DL, VT, LHS,
15053                         DAG.getConstant(Subtarget.getXLen() - 1, DL, VT));
15054         SDValue AND =
15055             DAG.getNode(ISD::AND, DL, VT, SRA,
15056                         DAG.getConstant(TrueSImm - FalseSImm, DL, VT));
15057         return DAG.getNode(ISD::ADD, DL, VT, AND, FalseV);
15058       }
15059 
15060       if (CCVal == ISD::CondCode::SETGE)
15061         std::swap(TrueV, FalseV);
15062     }
15063 
15064     if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
15065       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
15066                          {LHS, RHS, CC, TrueV, FalseV});
15067 
15068     if (!Subtarget.hasShortForwardBranchOpt()) {
15069       // (select c, -1, y) -> -c | y
15070       if (isAllOnesConstant(TrueV)) {
15071         SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, CCVal);
15072         SDValue Neg = DAG.getNegative(C, DL, VT);
15073         return DAG.getNode(ISD::OR, DL, VT, Neg, FalseV);
15074       }
15075       // (select c, y, -1) -> -!c | y
15076       if (isAllOnesConstant(FalseV)) {
15077         SDValue C =
15078             DAG.getSetCC(DL, VT, LHS, RHS, ISD::getSetCCInverse(CCVal, VT));
15079         SDValue Neg = DAG.getNegative(C, DL, VT);
15080         return DAG.getNode(ISD::OR, DL, VT, Neg, TrueV);
15081       }
15082 
15083       // (select c, 0, y) -> -!c & y
15084       if (isNullConstant(TrueV)) {
15085         SDValue C =
15086             DAG.getSetCC(DL, VT, LHS, RHS, ISD::getSetCCInverse(CCVal, VT));
15087         SDValue Neg = DAG.getNegative(C, DL, VT);
15088         return DAG.getNode(ISD::AND, DL, VT, Neg, FalseV);
15089       }
15090       // (select c, y, 0) -> -c & y
15091       if (isNullConstant(FalseV)) {
15092         SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, CCVal);
15093         SDValue Neg = DAG.getNegative(C, DL, VT);
15094         return DAG.getNode(ISD::AND, DL, VT, Neg, TrueV);
15095       }
15096       // (riscvisd::select_cc x, 0, ne, x, 1) -> (add x, (setcc x, 0, eq))
15097       // (riscvisd::select_cc x, 0, eq, 1, x) -> (add x, (setcc x, 0, eq))
15098       if (((isOneConstant(FalseV) && LHS == TrueV &&
15099             CCVal == ISD::CondCode::SETNE) ||
15100            (isOneConstant(TrueV) && LHS == FalseV &&
15101             CCVal == ISD::CondCode::SETEQ)) &&
15102           isNullConstant(RHS)) {
15103         // freeze it to be safe.
15104         LHS = DAG.getFreeze(LHS);
15105         SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, ISD::CondCode::SETEQ);
15106         return DAG.getNode(ISD::ADD, DL, VT, LHS, C);
15107       }
15108     }
15109 
15110     // If both true/false are an xor with 1, pull through the select.
15111     // This can occur after op legalization if both operands are setccs that
15112     // require an xor to invert.
15113     // FIXME: Generalize to other binary ops with identical operand?
15114     if (TrueV.getOpcode() == ISD::XOR && FalseV.getOpcode() == ISD::XOR &&
15115         TrueV.getOperand(1) == FalseV.getOperand(1) &&
15116         isOneConstant(TrueV.getOperand(1)) &&
15117         TrueV.hasOneUse() && FalseV.hasOneUse()) {
15118       SDValue NewSel = DAG.getNode(RISCVISD::SELECT_CC, DL, VT, LHS, RHS, CC,
15119                                    TrueV.getOperand(0), FalseV.getOperand(0));
15120       return DAG.getNode(ISD::XOR, DL, VT, NewSel, TrueV.getOperand(1));
15121     }
15122 
15123     return SDValue();
15124   }
15125   case RISCVISD::BR_CC: {
15126     SDValue LHS = N->getOperand(1);
15127     SDValue RHS = N->getOperand(2);
15128     SDValue CC = N->getOperand(3);
15129     SDLoc DL(N);
15130 
15131     if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
15132       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
15133                          N->getOperand(0), LHS, RHS, CC, N->getOperand(4));
15134 
15135     return SDValue();
15136   }
15137   case ISD::BITREVERSE:
15138     return performBITREVERSECombine(N, DAG, Subtarget);
15139   case ISD::FP_TO_SINT:
15140   case ISD::FP_TO_UINT:
15141     return performFP_TO_INTCombine(N, DCI, Subtarget);
15142   case ISD::FP_TO_SINT_SAT:
15143   case ISD::FP_TO_UINT_SAT:
15144     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
15145   case ISD::FCOPYSIGN: {
15146     EVT VT = N->getValueType(0);
15147     if (!VT.isVector())
15148       break;
15149     // There is a form of VFSGNJ which injects the negated sign of its second
15150     // operand. Try and bubble any FNEG up after the extend/round to produce
15151     // this optimized pattern. Avoid modifying cases where FP_ROUND and
15152     // TRUNC=1.
15153     SDValue In2 = N->getOperand(1);
15154     // Avoid cases where the extend/round has multiple uses, as duplicating
15155     // those is typically more expensive than removing a fneg.
15156     if (!In2.hasOneUse())
15157       break;
15158     if (In2.getOpcode() != ISD::FP_EXTEND &&
15159         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
15160       break;
15161     In2 = In2.getOperand(0);
15162     if (In2.getOpcode() != ISD::FNEG)
15163       break;
15164     SDLoc DL(N);
15165     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
15166     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
15167                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
15168   }
15169   case ISD::MGATHER: {
15170     const auto *MGN = dyn_cast<MaskedGatherSDNode>(N);
15171     const EVT VT = N->getValueType(0);
15172     SDValue Index = MGN->getIndex();
15173     SDValue ScaleOp = MGN->getScale();
15174     ISD::MemIndexType IndexType = MGN->getIndexType();
15175     assert(!MGN->isIndexScaled() &&
15176            "Scaled gather/scatter should not be formed");
15177 
15178     SDLoc DL(N);
15179     if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
15180       return DAG.getMaskedGather(
15181           N->getVTList(), MGN->getMemoryVT(), DL,
15182           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
15183            MGN->getBasePtr(), Index, ScaleOp},
15184           MGN->getMemOperand(), IndexType, MGN->getExtensionType());
15185 
15186     if (narrowIndex(Index, IndexType, DAG))
15187       return DAG.getMaskedGather(
15188           N->getVTList(), MGN->getMemoryVT(), DL,
15189           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
15190            MGN->getBasePtr(), Index, ScaleOp},
15191           MGN->getMemOperand(), IndexType, MGN->getExtensionType());
15192 
15193     if (Index.getOpcode() == ISD::BUILD_VECTOR &&
15194         MGN->getExtensionType() == ISD::NON_EXTLOAD) {
15195       if (std::optional<VIDSequence> SimpleVID = isSimpleVIDSequence(Index);
15196           SimpleVID && SimpleVID->StepDenominator == 1) {
15197         const int64_t StepNumerator = SimpleVID->StepNumerator;
15198         const int64_t Addend = SimpleVID->Addend;
15199 
15200         // Note: We don't need to check alignment here since (by assumption
15201         // from the existance of the gather), our offsets must be sufficiently
15202         // aligned.
15203 
15204         const EVT PtrVT = getPointerTy(DAG.getDataLayout());
15205         assert(MGN->getBasePtr()->getValueType(0) == PtrVT);
15206         assert(IndexType == ISD::UNSIGNED_SCALED);
15207         SDValue BasePtr = DAG.getNode(ISD::ADD, DL, PtrVT, MGN->getBasePtr(),
15208                                       DAG.getConstant(Addend, DL, PtrVT));
15209 
15210         SDVTList VTs = DAG.getVTList({VT, MVT::Other});
15211         SDValue IntID =
15212           DAG.getTargetConstant(Intrinsic::riscv_masked_strided_load, DL,
15213                                 XLenVT);
15214         SDValue Ops[] =
15215           {MGN->getChain(), IntID, MGN->getPassThru(), BasePtr,
15216            DAG.getConstant(StepNumerator, DL, XLenVT), MGN->getMask()};
15217         return DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
15218                                        Ops, VT, MGN->getMemOperand());
15219       }
15220     }
15221 
15222     SmallVector<int> ShuffleMask;
15223     if (MGN->getExtensionType() == ISD::NON_EXTLOAD &&
15224         matchIndexAsShuffle(VT, Index, MGN->getMask(), ShuffleMask)) {
15225       SDValue Load = DAG.getMaskedLoad(VT, DL, MGN->getChain(),
15226                                        MGN->getBasePtr(), DAG.getUNDEF(XLenVT),
15227                                        MGN->getMask(), DAG.getUNDEF(VT),
15228                                        MGN->getMemoryVT(), MGN->getMemOperand(),
15229                                        ISD::UNINDEXED, ISD::NON_EXTLOAD);
15230       SDValue Shuffle =
15231         DAG.getVectorShuffle(VT, DL, Load, DAG.getUNDEF(VT), ShuffleMask);
15232       return DAG.getMergeValues({Shuffle, Load.getValue(1)}, DL);
15233     }
15234 
15235     if (MGN->getExtensionType() == ISD::NON_EXTLOAD &&
15236         matchIndexAsWiderOp(VT, Index, MGN->getMask(),
15237                             MGN->getMemOperand()->getBaseAlign(), Subtarget)) {
15238       SmallVector<SDValue> NewIndices;
15239       for (unsigned i = 0; i < Index->getNumOperands(); i += 2)
15240         NewIndices.push_back(Index.getOperand(i));
15241       EVT IndexVT = Index.getValueType()
15242         .getHalfNumVectorElementsVT(*DAG.getContext());
15243       Index = DAG.getBuildVector(IndexVT, DL, NewIndices);
15244 
15245       unsigned ElementSize = VT.getScalarStoreSize();
15246       EVT WideScalarVT = MVT::getIntegerVT(ElementSize * 8 * 2);
15247       auto EltCnt = VT.getVectorElementCount();
15248       assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
15249       EVT WideVT = EVT::getVectorVT(*DAG.getContext(), WideScalarVT,
15250                                     EltCnt.divideCoefficientBy(2));
15251       SDValue Passthru = DAG.getBitcast(WideVT, MGN->getPassThru());
15252       EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
15253                                     EltCnt.divideCoefficientBy(2));
15254       SDValue Mask = DAG.getSplat(MaskVT, DL, DAG.getConstant(1, DL, MVT::i1));
15255 
15256       SDValue Gather =
15257         DAG.getMaskedGather(DAG.getVTList(WideVT, MVT::Other), WideVT, DL,
15258                             {MGN->getChain(), Passthru, Mask, MGN->getBasePtr(),
15259                              Index, ScaleOp},
15260                             MGN->getMemOperand(), IndexType, ISD::NON_EXTLOAD);
15261       SDValue Result = DAG.getBitcast(VT, Gather.getValue(0));
15262       return DAG.getMergeValues({Result, Gather.getValue(1)}, DL);
15263     }
15264     break;
15265   }
15266   case ISD::MSCATTER:{
15267     const auto *MSN = dyn_cast<MaskedScatterSDNode>(N);
15268     SDValue Index = MSN->getIndex();
15269     SDValue ScaleOp = MSN->getScale();
15270     ISD::MemIndexType IndexType = MSN->getIndexType();
15271     assert(!MSN->isIndexScaled() &&
15272            "Scaled gather/scatter should not be formed");
15273 
15274     SDLoc DL(N);
15275     if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
15276       return DAG.getMaskedScatter(
15277           N->getVTList(), MSN->getMemoryVT(), DL,
15278           {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
15279            Index, ScaleOp},
15280           MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
15281 
15282     if (narrowIndex(Index, IndexType, DAG))
15283       return DAG.getMaskedScatter(
15284           N->getVTList(), MSN->getMemoryVT(), DL,
15285           {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
15286            Index, ScaleOp},
15287           MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
15288 
15289     EVT VT = MSN->getValue()->getValueType(0);
15290     SmallVector<int> ShuffleMask;
15291     if (!MSN->isTruncatingStore() &&
15292         matchIndexAsShuffle(VT, Index, MSN->getMask(), ShuffleMask)) {
15293       SDValue Shuffle = DAG.getVectorShuffle(VT, DL, MSN->getValue(),
15294                                              DAG.getUNDEF(VT), ShuffleMask);
15295       return DAG.getMaskedStore(MSN->getChain(), DL, Shuffle, MSN->getBasePtr(),
15296                                 DAG.getUNDEF(XLenVT), MSN->getMask(),
15297                                 MSN->getMemoryVT(), MSN->getMemOperand(),
15298                                 ISD::UNINDEXED, false);
15299     }
15300     break;
15301   }
15302   case ISD::VP_GATHER: {
15303     const auto *VPGN = dyn_cast<VPGatherSDNode>(N);
15304     SDValue Index = VPGN->getIndex();
15305     SDValue ScaleOp = VPGN->getScale();
15306     ISD::MemIndexType IndexType = VPGN->getIndexType();
15307     assert(!VPGN->isIndexScaled() &&
15308            "Scaled gather/scatter should not be formed");
15309 
15310     SDLoc DL(N);
15311     if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
15312       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
15313                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
15314                               ScaleOp, VPGN->getMask(),
15315                               VPGN->getVectorLength()},
15316                              VPGN->getMemOperand(), IndexType);
15317 
15318     if (narrowIndex(Index, IndexType, DAG))
15319       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
15320                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
15321                               ScaleOp, VPGN->getMask(),
15322                               VPGN->getVectorLength()},
15323                              VPGN->getMemOperand(), IndexType);
15324 
15325     break;
15326   }
15327   case ISD::VP_SCATTER: {
15328     const auto *VPSN = dyn_cast<VPScatterSDNode>(N);
15329     SDValue Index = VPSN->getIndex();
15330     SDValue ScaleOp = VPSN->getScale();
15331     ISD::MemIndexType IndexType = VPSN->getIndexType();
15332     assert(!VPSN->isIndexScaled() &&
15333            "Scaled gather/scatter should not be formed");
15334 
15335     SDLoc DL(N);
15336     if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
15337       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
15338                               {VPSN->getChain(), VPSN->getValue(),
15339                                VPSN->getBasePtr(), Index, ScaleOp,
15340                                VPSN->getMask(), VPSN->getVectorLength()},
15341                               VPSN->getMemOperand(), IndexType);
15342 
15343     if (narrowIndex(Index, IndexType, DAG))
15344       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
15345                               {VPSN->getChain(), VPSN->getValue(),
15346                                VPSN->getBasePtr(), Index, ScaleOp,
15347                                VPSN->getMask(), VPSN->getVectorLength()},
15348                               VPSN->getMemOperand(), IndexType);
15349     break;
15350   }
15351   case RISCVISD::SRA_VL:
15352   case RISCVISD::SRL_VL:
15353   case RISCVISD::SHL_VL: {
15354     SDValue ShAmt = N->getOperand(1);
15355     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
15356       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
15357       SDLoc DL(N);
15358       SDValue VL = N->getOperand(4);
15359       EVT VT = N->getValueType(0);
15360       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
15361                           ShAmt.getOperand(1), VL);
15362       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
15363                          N->getOperand(2), N->getOperand(3), N->getOperand(4));
15364     }
15365     break;
15366   }
15367   case ISD::SRA:
15368     if (SDValue V = performSRACombine(N, DAG, Subtarget))
15369       return V;
15370     [[fallthrough]];
15371   case ISD::SRL:
15372   case ISD::SHL: {
15373     SDValue ShAmt = N->getOperand(1);
15374     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
15375       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
15376       SDLoc DL(N);
15377       EVT VT = N->getValueType(0);
15378       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
15379                           ShAmt.getOperand(1),
15380                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
15381       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
15382     }
15383     break;
15384   }
15385   case RISCVISD::ADD_VL:
15386     if (SDValue V = combineBinOp_VLToVWBinOp_VL(N, DCI))
15387       return V;
15388     return combineToVWMACC(N, DAG, Subtarget);
15389   case RISCVISD::SUB_VL:
15390   case RISCVISD::VWADD_W_VL:
15391   case RISCVISD::VWADDU_W_VL:
15392   case RISCVISD::VWSUB_W_VL:
15393   case RISCVISD::VWSUBU_W_VL:
15394   case RISCVISD::MUL_VL:
15395     return combineBinOp_VLToVWBinOp_VL(N, DCI);
15396   case RISCVISD::VFMADD_VL:
15397   case RISCVISD::VFNMADD_VL:
15398   case RISCVISD::VFMSUB_VL:
15399   case RISCVISD::VFNMSUB_VL:
15400   case RISCVISD::STRICT_VFMADD_VL:
15401   case RISCVISD::STRICT_VFNMADD_VL:
15402   case RISCVISD::STRICT_VFMSUB_VL:
15403   case RISCVISD::STRICT_VFNMSUB_VL:
15404     return performVFMADD_VLCombine(N, DAG, Subtarget);
15405   case RISCVISD::FMUL_VL:
15406     return performVFMUL_VLCombine(N, DAG, Subtarget);
15407   case RISCVISD::FADD_VL:
15408   case RISCVISD::FSUB_VL:
15409     return performFADDSUB_VLCombine(N, DAG, Subtarget);
15410   case ISD::LOAD:
15411   case ISD::STORE: {
15412     if (DCI.isAfterLegalizeDAG())
15413       if (SDValue V = performMemPairCombine(N, DCI))
15414         return V;
15415 
15416     if (N->getOpcode() != ISD::STORE)
15417       break;
15418 
15419     auto *Store = cast<StoreSDNode>(N);
15420     SDValue Chain = Store->getChain();
15421     EVT MemVT = Store->getMemoryVT();
15422     SDValue Val = Store->getValue();
15423     SDLoc DL(N);
15424 
15425     bool IsScalarizable =
15426         MemVT.isFixedLengthVector() && ISD::isNormalStore(Store) &&
15427         Store->isSimple() &&
15428         MemVT.getVectorElementType().bitsLE(Subtarget.getXLenVT()) &&
15429         isPowerOf2_64(MemVT.getSizeInBits()) &&
15430         MemVT.getSizeInBits() <= Subtarget.getXLen();
15431 
15432     // If sufficiently aligned we can scalarize stores of constant vectors of
15433     // any power-of-two size up to XLen bits, provided that they aren't too
15434     // expensive to materialize.
15435     //   vsetivli   zero, 2, e8, m1, ta, ma
15436     //   vmv.v.i    v8, 4
15437     //   vse64.v    v8, (a0)
15438     // ->
15439     //   li     a1, 1028
15440     //   sh     a1, 0(a0)
15441     if (DCI.isBeforeLegalize() && IsScalarizable &&
15442         ISD::isBuildVectorOfConstantSDNodes(Val.getNode())) {
15443       // Get the constant vector bits
15444       APInt NewC(Val.getValueSizeInBits(), 0);
15445       uint64_t EltSize = Val.getScalarValueSizeInBits();
15446       for (unsigned i = 0; i < Val.getNumOperands(); i++) {
15447         if (Val.getOperand(i).isUndef())
15448           continue;
15449         NewC.insertBits(Val.getConstantOperandAPInt(i).trunc(EltSize),
15450                         i * EltSize);
15451       }
15452       MVT NewVT = MVT::getIntegerVT(MemVT.getSizeInBits());
15453 
15454       if (RISCVMatInt::getIntMatCost(NewC, Subtarget.getXLen(), Subtarget,
15455                                      true) <= 2 &&
15456           allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
15457                                          NewVT, *Store->getMemOperand())) {
15458         SDValue NewV = DAG.getConstant(NewC, DL, NewVT);
15459         return DAG.getStore(Chain, DL, NewV, Store->getBasePtr(),
15460                             Store->getPointerInfo(), Store->getOriginalAlign(),
15461                             Store->getMemOperand()->getFlags());
15462       }
15463     }
15464 
15465     // Similarly, if sufficiently aligned we can scalarize vector copies, e.g.
15466     //   vsetivli   zero, 2, e16, m1, ta, ma
15467     //   vle16.v    v8, (a0)
15468     //   vse16.v    v8, (a1)
15469     if (auto *L = dyn_cast<LoadSDNode>(Val);
15470         L && DCI.isBeforeLegalize() && IsScalarizable && L->isSimple() &&
15471         L->hasNUsesOfValue(1, 0) && L->hasNUsesOfValue(1, 1) &&
15472         Store->getChain() == SDValue(L, 1) && ISD::isNormalLoad(L) &&
15473         L->getMemoryVT() == MemVT) {
15474       MVT NewVT = MVT::getIntegerVT(MemVT.getSizeInBits());
15475       if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
15476                                          NewVT, *Store->getMemOperand()) &&
15477           allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
15478                                          NewVT, *L->getMemOperand())) {
15479         SDValue NewL = DAG.getLoad(NewVT, DL, L->getChain(), L->getBasePtr(),
15480                                    L->getPointerInfo(), L->getOriginalAlign(),
15481                                    L->getMemOperand()->getFlags());
15482         return DAG.getStore(Chain, DL, NewL, Store->getBasePtr(),
15483                             Store->getPointerInfo(), Store->getOriginalAlign(),
15484                             Store->getMemOperand()->getFlags());
15485       }
15486     }
15487 
15488     // Combine store of vmv.x.s/vfmv.f.s to vse with VL of 1.
15489     // vfmv.f.s is represented as extract element from 0. Match it late to avoid
15490     // any illegal types.
15491     if (Val.getOpcode() == RISCVISD::VMV_X_S ||
15492         (DCI.isAfterLegalizeDAG() &&
15493          Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15494          isNullConstant(Val.getOperand(1)))) {
15495       SDValue Src = Val.getOperand(0);
15496       MVT VecVT = Src.getSimpleValueType();
15497       // VecVT should be scalable and memory VT should match the element type.
15498       if (!Store->isIndexed() && VecVT.isScalableVector() &&
15499           MemVT == VecVT.getVectorElementType()) {
15500         SDLoc DL(N);
15501         MVT MaskVT = getMaskTypeFor(VecVT);
15502         return DAG.getStoreVP(
15503             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
15504             DAG.getConstant(1, DL, MaskVT),
15505             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
15506             Store->getMemOperand(), Store->getAddressingMode(),
15507             Store->isTruncatingStore(), /*IsCompress*/ false);
15508       }
15509     }
15510 
15511     break;
15512   }
15513   case ISD::SPLAT_VECTOR: {
15514     EVT VT = N->getValueType(0);
15515     // Only perform this combine on legal MVT types.
15516     if (!isTypeLegal(VT))
15517       break;
15518     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
15519                                          DAG, Subtarget))
15520       return Gather;
15521     break;
15522   }
15523   case ISD::BUILD_VECTOR:
15524     if (SDValue V = performBUILD_VECTORCombine(N, DAG, Subtarget, *this))
15525       return V;
15526     break;
15527   case ISD::CONCAT_VECTORS:
15528     if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, *this))
15529       return V;
15530     break;
15531   case ISD::INSERT_VECTOR_ELT:
15532     if (SDValue V = performINSERT_VECTOR_ELTCombine(N, DAG, Subtarget, *this))
15533       return V;
15534     break;
15535   case RISCVISD::VFMV_V_F_VL: {
15536     const MVT VT = N->getSimpleValueType(0);
15537     SDValue Passthru = N->getOperand(0);
15538     SDValue Scalar = N->getOperand(1);
15539     SDValue VL = N->getOperand(2);
15540 
15541     // If VL is 1, we can use vfmv.s.f.
15542     if (isOneConstant(VL))
15543       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
15544     break;
15545   }
15546   case RISCVISD::VMV_V_X_VL: {
15547     const MVT VT = N->getSimpleValueType(0);
15548     SDValue Passthru = N->getOperand(0);
15549     SDValue Scalar = N->getOperand(1);
15550     SDValue VL = N->getOperand(2);
15551 
15552     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
15553     // scalar input.
15554     unsigned ScalarSize = Scalar.getValueSizeInBits();
15555     unsigned EltWidth = VT.getScalarSizeInBits();
15556     if (ScalarSize > EltWidth && Passthru.isUndef())
15557       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
15558         return SDValue(N, 0);
15559 
15560     // If VL is 1 and the scalar value won't benefit from immediate, we can
15561     // use vmv.s.x.
15562     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
15563     if (isOneConstant(VL) &&
15564         (!Const || Const->isZero() ||
15565          !Const->getAPIntValue().sextOrTrunc(EltWidth).isSignedIntN(5)))
15566       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
15567 
15568     break;
15569   }
15570   case RISCVISD::VFMV_S_F_VL: {
15571     SDValue Src = N->getOperand(1);
15572     // Try to remove vector->scalar->vector if the scalar->vector is inserting
15573     // into an undef vector.
15574     // TODO: Could use a vslide or vmv.v.v for non-undef.
15575     if (N->getOperand(0).isUndef() &&
15576         Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15577         isNullConstant(Src.getOperand(1)) &&
15578         Src.getOperand(0).getValueType().isScalableVector()) {
15579       EVT VT = N->getValueType(0);
15580       EVT SrcVT = Src.getOperand(0).getValueType();
15581       assert(SrcVT.getVectorElementType() == VT.getVectorElementType());
15582       // Widths match, just return the original vector.
15583       if (SrcVT == VT)
15584         return Src.getOperand(0);
15585       // TODO: Use insert_subvector/extract_subvector to change widen/narrow?
15586     }
15587     [[fallthrough]];
15588   }
15589   case RISCVISD::VMV_S_X_VL: {
15590     const MVT VT = N->getSimpleValueType(0);
15591     SDValue Passthru = N->getOperand(0);
15592     SDValue Scalar = N->getOperand(1);
15593     SDValue VL = N->getOperand(2);
15594 
15595     // Use M1 or smaller to avoid over constraining register allocation
15596     const MVT M1VT = getLMUL1VT(VT);
15597     if (M1VT.bitsLT(VT)) {
15598       SDValue M1Passthru =
15599           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, Passthru,
15600                       DAG.getVectorIdxConstant(0, DL));
15601       SDValue Result =
15602           DAG.getNode(N->getOpcode(), DL, M1VT, M1Passthru, Scalar, VL);
15603       Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Passthru, Result,
15604                            DAG.getConstant(0, DL, XLenVT));
15605       return Result;
15606     }
15607 
15608     // We use a vmv.v.i if possible.  We limit this to LMUL1.  LMUL2 or
15609     // higher would involve overly constraining the register allocator for
15610     // no purpose.
15611     if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
15612         Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
15613         VT.bitsLE(getLMUL1VT(VT)) && Passthru.isUndef())
15614       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
15615 
15616     break;
15617   }
15618   case ISD::INTRINSIC_VOID:
15619   case ISD::INTRINSIC_W_CHAIN:
15620   case ISD::INTRINSIC_WO_CHAIN: {
15621     unsigned IntOpNo = N->getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
15622     unsigned IntNo = N->getConstantOperandVal(IntOpNo);
15623     switch (IntNo) {
15624       // By default we do not combine any intrinsic.
15625     default:
15626       return SDValue();
15627     case Intrinsic::riscv_masked_strided_load: {
15628       MVT VT = N->getSimpleValueType(0);
15629       auto *Load = cast<MemIntrinsicSDNode>(N);
15630       SDValue PassThru = N->getOperand(2);
15631       SDValue Base = N->getOperand(3);
15632       SDValue Stride = N->getOperand(4);
15633       SDValue Mask = N->getOperand(5);
15634 
15635       // If the stride is equal to the element size in bytes,  we can use
15636       // a masked.load.
15637       const unsigned ElementSize = VT.getScalarStoreSize();
15638       if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
15639           StrideC && StrideC->getZExtValue() == ElementSize)
15640         return DAG.getMaskedLoad(VT, DL, Load->getChain(), Base,
15641                                  DAG.getUNDEF(XLenVT), Mask, PassThru,
15642                                  Load->getMemoryVT(), Load->getMemOperand(),
15643                                  ISD::UNINDEXED, ISD::NON_EXTLOAD);
15644       return SDValue();
15645     }
15646     case Intrinsic::riscv_masked_strided_store: {
15647       auto *Store = cast<MemIntrinsicSDNode>(N);
15648       SDValue Value = N->getOperand(2);
15649       SDValue Base = N->getOperand(3);
15650       SDValue Stride = N->getOperand(4);
15651       SDValue Mask = N->getOperand(5);
15652 
15653       // If the stride is equal to the element size in bytes,  we can use
15654       // a masked.store.
15655       const unsigned ElementSize = Value.getValueType().getScalarStoreSize();
15656       if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
15657           StrideC && StrideC->getZExtValue() == ElementSize)
15658         return DAG.getMaskedStore(Store->getChain(), DL, Value, Base,
15659                                   DAG.getUNDEF(XLenVT), Mask,
15660                                   Store->getMemoryVT(), Store->getMemOperand(),
15661                                   ISD::UNINDEXED, false);
15662       return SDValue();
15663     }
15664     case Intrinsic::riscv_vcpop:
15665     case Intrinsic::riscv_vcpop_mask:
15666     case Intrinsic::riscv_vfirst:
15667     case Intrinsic::riscv_vfirst_mask: {
15668       SDValue VL = N->getOperand(2);
15669       if (IntNo == Intrinsic::riscv_vcpop_mask ||
15670           IntNo == Intrinsic::riscv_vfirst_mask)
15671         VL = N->getOperand(3);
15672       if (!isNullConstant(VL))
15673         return SDValue();
15674       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
15675       SDLoc DL(N);
15676       EVT VT = N->getValueType(0);
15677       if (IntNo == Intrinsic::riscv_vfirst ||
15678           IntNo == Intrinsic::riscv_vfirst_mask)
15679         return DAG.getConstant(-1, DL, VT);
15680       return DAG.getConstant(0, DL, VT);
15681     }
15682     }
15683   }
15684   case ISD::BITCAST: {
15685     assert(Subtarget.useRVVForFixedLengthVectors());
15686     SDValue N0 = N->getOperand(0);
15687     EVT VT = N->getValueType(0);
15688     EVT SrcVT = N0.getValueType();
15689     // If this is a bitcast between a MVT::v4i1/v2i1/v1i1 and an illegal integer
15690     // type, widen both sides to avoid a trip through memory.
15691     if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
15692         VT.isScalarInteger()) {
15693       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
15694       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
15695       Ops[0] = N0;
15696       SDLoc DL(N);
15697       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i1, Ops);
15698       N0 = DAG.getBitcast(MVT::i8, N0);
15699       return DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
15700     }
15701 
15702     return SDValue();
15703   }
15704   }
15705 
15706   return SDValue();
15707 }
15708 
15709 bool RISCVTargetLowering::shouldTransformSignedTruncationCheck(
15710     EVT XVT, unsigned KeptBits) const {
15711   // For vectors, we don't have a preference..
15712   if (XVT.isVector())
15713     return false;
15714 
15715   if (XVT != MVT::i32 && XVT != MVT::i64)
15716     return false;
15717 
15718   // We can use sext.w for RV64 or an srai 31 on RV32.
15719   if (KeptBits == 32 || KeptBits == 64)
15720     return true;
15721 
15722   // With Zbb we can use sext.h/sext.b.
15723   return Subtarget.hasStdExtZbb() &&
15724          ((KeptBits == 8 && XVT == MVT::i64 && !Subtarget.is64Bit()) ||
15725           KeptBits == 16);
15726 }
15727 
15728 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
15729     const SDNode *N, CombineLevel Level) const {
15730   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
15731           N->getOpcode() == ISD::SRL) &&
15732          "Expected shift op");
15733 
15734   // The following folds are only desirable if `(OP _, c1 << c2)` can be
15735   // materialised in fewer instructions than `(OP _, c1)`:
15736   //
15737   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
15738   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
15739   SDValue N0 = N->getOperand(0);
15740   EVT Ty = N0.getValueType();
15741   if (Ty.isScalarInteger() &&
15742       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
15743     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
15744     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15745     if (C1 && C2) {
15746       const APInt &C1Int = C1->getAPIntValue();
15747       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
15748 
15749       // We can materialise `c1 << c2` into an add immediate, so it's "free",
15750       // and the combine should happen, to potentially allow further combines
15751       // later.
15752       if (ShiftedC1Int.getSignificantBits() <= 64 &&
15753           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
15754         return true;
15755 
15756       // We can materialise `c1` in an add immediate, so it's "free", and the
15757       // combine should be prevented.
15758       if (C1Int.getSignificantBits() <= 64 &&
15759           isLegalAddImmediate(C1Int.getSExtValue()))
15760         return false;
15761 
15762       // Neither constant will fit into an immediate, so find materialisation
15763       // costs.
15764       int C1Cost =
15765           RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), Subtarget,
15766                                      /*CompressionCost*/ true);
15767       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
15768           ShiftedC1Int, Ty.getSizeInBits(), Subtarget,
15769           /*CompressionCost*/ true);
15770 
15771       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
15772       // combine should be prevented.
15773       if (C1Cost < ShiftedC1Cost)
15774         return false;
15775     }
15776   }
15777   return true;
15778 }
15779 
15780 bool RISCVTargetLowering::targetShrinkDemandedConstant(
15781     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
15782     TargetLoweringOpt &TLO) const {
15783   // Delay this optimization as late as possible.
15784   if (!TLO.LegalOps)
15785     return false;
15786 
15787   EVT VT = Op.getValueType();
15788   if (VT.isVector())
15789     return false;
15790 
15791   unsigned Opcode = Op.getOpcode();
15792   if (Opcode != ISD::AND && Opcode != ISD::OR && Opcode != ISD::XOR)
15793     return false;
15794 
15795   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15796   if (!C)
15797     return false;
15798 
15799   const APInt &Mask = C->getAPIntValue();
15800 
15801   // Clear all non-demanded bits initially.
15802   APInt ShrunkMask = Mask & DemandedBits;
15803 
15804   // Try to make a smaller immediate by setting undemanded bits.
15805 
15806   APInt ExpandedMask = Mask | ~DemandedBits;
15807 
15808   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
15809     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
15810   };
15811   auto UseMask = [Mask, Op, &TLO](const APInt &NewMask) -> bool {
15812     if (NewMask == Mask)
15813       return true;
15814     SDLoc DL(Op);
15815     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, Op.getValueType());
15816     SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
15817                                     Op.getOperand(0), NewC);
15818     return TLO.CombineTo(Op, NewOp);
15819   };
15820 
15821   // If the shrunk mask fits in sign extended 12 bits, let the target
15822   // independent code apply it.
15823   if (ShrunkMask.isSignedIntN(12))
15824     return false;
15825 
15826   // And has a few special cases for zext.
15827   if (Opcode == ISD::AND) {
15828     // Preserve (and X, 0xffff), if zext.h exists use zext.h,
15829     // otherwise use SLLI + SRLI.
15830     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
15831     if (IsLegalMask(NewMask))
15832       return UseMask(NewMask);
15833 
15834     // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
15835     if (VT == MVT::i64) {
15836       APInt NewMask = APInt(64, 0xffffffff);
15837       if (IsLegalMask(NewMask))
15838         return UseMask(NewMask);
15839     }
15840   }
15841 
15842   // For the remaining optimizations, we need to be able to make a negative
15843   // number through a combination of mask and undemanded bits.
15844   if (!ExpandedMask.isNegative())
15845     return false;
15846 
15847   // What is the fewest number of bits we need to represent the negative number.
15848   unsigned MinSignedBits = ExpandedMask.getSignificantBits();
15849 
15850   // Try to make a 12 bit negative immediate. If that fails try to make a 32
15851   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
15852   // If we can't create a simm12, we shouldn't change opaque constants.
15853   APInt NewMask = ShrunkMask;
15854   if (MinSignedBits <= 12)
15855     NewMask.setBitsFrom(11);
15856   else if (!C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
15857     NewMask.setBitsFrom(31);
15858   else
15859     return false;
15860 
15861   // Check that our new mask is a subset of the demanded mask.
15862   assert(IsLegalMask(NewMask));
15863   return UseMask(NewMask);
15864 }
15865 
15866 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
15867   static const uint64_t GREVMasks[] = {
15868       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
15869       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
15870 
15871   for (unsigned Stage = 0; Stage != 6; ++Stage) {
15872     unsigned Shift = 1 << Stage;
15873     if (ShAmt & Shift) {
15874       uint64_t Mask = GREVMasks[Stage];
15875       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
15876       if (IsGORC)
15877         Res |= x;
15878       x = Res;
15879     }
15880   }
15881 
15882   return x;
15883 }
15884 
15885 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15886                                                         KnownBits &Known,
15887                                                         const APInt &DemandedElts,
15888                                                         const SelectionDAG &DAG,
15889                                                         unsigned Depth) const {
15890   unsigned BitWidth = Known.getBitWidth();
15891   unsigned Opc = Op.getOpcode();
15892   assert((Opc >= ISD::BUILTIN_OP_END ||
15893           Opc == ISD::INTRINSIC_WO_CHAIN ||
15894           Opc == ISD::INTRINSIC_W_CHAIN ||
15895           Opc == ISD::INTRINSIC_VOID) &&
15896          "Should use MaskedValueIsZero if you don't know whether Op"
15897          " is a target node!");
15898 
15899   Known.resetAll();
15900   switch (Opc) {
15901   default: break;
15902   case RISCVISD::SELECT_CC: {
15903     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
15904     // If we don't know any bits, early out.
15905     if (Known.isUnknown())
15906       break;
15907     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
15908 
15909     // Only known if known in both the LHS and RHS.
15910     Known = Known.intersectWith(Known2);
15911     break;
15912   }
15913   case RISCVISD::CZERO_EQZ:
15914   case RISCVISD::CZERO_NEZ:
15915     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
15916     // Result is either all zero or operand 0. We can propagate zeros, but not
15917     // ones.
15918     Known.One.clearAllBits();
15919     break;
15920   case RISCVISD::REMUW: {
15921     KnownBits Known2;
15922     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
15923     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
15924     // We only care about the lower 32 bits.
15925     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
15926     // Restore the original width by sign extending.
15927     Known = Known.sext(BitWidth);
15928     break;
15929   }
15930   case RISCVISD::DIVUW: {
15931     KnownBits Known2;
15932     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
15933     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
15934     // We only care about the lower 32 bits.
15935     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
15936     // Restore the original width by sign extending.
15937     Known = Known.sext(BitWidth);
15938     break;
15939   }
15940   case RISCVISD::SLLW: {
15941     KnownBits Known2;
15942     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
15943     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
15944     Known = KnownBits::shl(Known.trunc(32), Known2.trunc(5).zext(32));
15945     // Restore the original width by sign extending.
15946     Known = Known.sext(BitWidth);
15947     break;
15948   }
15949   case RISCVISD::CTZW: {
15950     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
15951     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
15952     unsigned LowBits = llvm::bit_width(PossibleTZ);
15953     Known.Zero.setBitsFrom(LowBits);
15954     break;
15955   }
15956   case RISCVISD::CLZW: {
15957     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
15958     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
15959     unsigned LowBits = llvm::bit_width(PossibleLZ);
15960     Known.Zero.setBitsFrom(LowBits);
15961     break;
15962   }
15963   case RISCVISD::BREV8:
15964   case RISCVISD::ORC_B: {
15965     // FIXME: This is based on the non-ratified Zbp GREV and GORC where a
15966     // control value of 7 is equivalent to brev8 and orc.b.
15967     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
15968     bool IsGORC = Op.getOpcode() == RISCVISD::ORC_B;
15969     // To compute zeros, we need to invert the value and invert it back after.
15970     Known.Zero =
15971         ~computeGREVOrGORC(~Known.Zero.getZExtValue(), 7, IsGORC);
15972     Known.One = computeGREVOrGORC(Known.One.getZExtValue(), 7, IsGORC);
15973     break;
15974   }
15975   case RISCVISD::READ_VLENB: {
15976     // We can use the minimum and maximum VLEN values to bound VLENB.  We
15977     // know VLEN must be a power of two.
15978     const unsigned MinVLenB = Subtarget.getRealMinVLen() / 8;
15979     const unsigned MaxVLenB = Subtarget.getRealMaxVLen() / 8;
15980     assert(MinVLenB > 0 && "READ_VLENB without vector extension enabled?");
15981     Known.Zero.setLowBits(Log2_32(MinVLenB));
15982     Known.Zero.setBitsFrom(Log2_32(MaxVLenB)+1);
15983     if (MaxVLenB == MinVLenB)
15984       Known.One.setBit(Log2_32(MinVLenB));
15985     break;
15986   }
15987   case RISCVISD::FCLASS: {
15988     // fclass will only set one of the low 10 bits.
15989     Known.Zero.setBitsFrom(10);
15990     break;
15991   }
15992   case ISD::INTRINSIC_W_CHAIN:
15993   case ISD::INTRINSIC_WO_CHAIN: {
15994     unsigned IntNo =
15995         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
15996     switch (IntNo) {
15997     default:
15998       // We can't do anything for most intrinsics.
15999       break;
16000     case Intrinsic::riscv_vsetvli:
16001     case Intrinsic::riscv_vsetvlimax:
16002       // Assume that VL output is <= 65536.
16003       // TODO: Take SEW and LMUL into account.
16004       if (BitWidth > 17)
16005         Known.Zero.setBitsFrom(17);
16006       break;
16007     }
16008     break;
16009   }
16010   }
16011 }
16012 
16013 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
16014     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
16015     unsigned Depth) const {
16016   switch (Op.getOpcode()) {
16017   default:
16018     break;
16019   case RISCVISD::SELECT_CC: {
16020     unsigned Tmp =
16021         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
16022     if (Tmp == 1) return 1;  // Early out.
16023     unsigned Tmp2 =
16024         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
16025     return std::min(Tmp, Tmp2);
16026   }
16027   case RISCVISD::CZERO_EQZ:
16028   case RISCVISD::CZERO_NEZ:
16029     // Output is either all zero or operand 0. We can propagate sign bit count
16030     // from operand 0.
16031     return DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
16032   case RISCVISD::ABSW: {
16033     // We expand this at isel to negw+max. The result will have 33 sign bits
16034     // if the input has at least 33 sign bits.
16035     unsigned Tmp =
16036         DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
16037     if (Tmp < 33) return 1;
16038     return 33;
16039   }
16040   case RISCVISD::SLLW:
16041   case RISCVISD::SRAW:
16042   case RISCVISD::SRLW:
16043   case RISCVISD::DIVW:
16044   case RISCVISD::DIVUW:
16045   case RISCVISD::REMUW:
16046   case RISCVISD::ROLW:
16047   case RISCVISD::RORW:
16048   case RISCVISD::FCVT_W_RV64:
16049   case RISCVISD::FCVT_WU_RV64:
16050   case RISCVISD::STRICT_FCVT_W_RV64:
16051   case RISCVISD::STRICT_FCVT_WU_RV64:
16052     // TODO: As the result is sign-extended, this is conservatively correct. A
16053     // more precise answer could be calculated for SRAW depending on known
16054     // bits in the shift amount.
16055     return 33;
16056   case RISCVISD::VMV_X_S: {
16057     // The number of sign bits of the scalar result is computed by obtaining the
16058     // element type of the input vector operand, subtracting its width from the
16059     // XLEN, and then adding one (sign bit within the element type). If the
16060     // element type is wider than XLen, the least-significant XLEN bits are
16061     // taken.
16062     unsigned XLen = Subtarget.getXLen();
16063     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
16064     if (EltBits <= XLen)
16065       return XLen - EltBits + 1;
16066     break;
16067   }
16068   case ISD::INTRINSIC_W_CHAIN: {
16069     unsigned IntNo = Op.getConstantOperandVal(1);
16070     switch (IntNo) {
16071     default:
16072       break;
16073     case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
16074     case Intrinsic::riscv_masked_atomicrmw_add_i64:
16075     case Intrinsic::riscv_masked_atomicrmw_sub_i64:
16076     case Intrinsic::riscv_masked_atomicrmw_nand_i64:
16077     case Intrinsic::riscv_masked_atomicrmw_max_i64:
16078     case Intrinsic::riscv_masked_atomicrmw_min_i64:
16079     case Intrinsic::riscv_masked_atomicrmw_umax_i64:
16080     case Intrinsic::riscv_masked_atomicrmw_umin_i64:
16081     case Intrinsic::riscv_masked_cmpxchg_i64:
16082       // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
16083       // narrow atomic operation. These are implemented using atomic
16084       // operations at the minimum supported atomicrmw/cmpxchg width whose
16085       // result is then sign extended to XLEN. With +A, the minimum width is
16086       // 32 for both 64 and 32.
16087       assert(Subtarget.getXLen() == 64);
16088       assert(getMinCmpXchgSizeInBits() == 32);
16089       assert(Subtarget.hasStdExtA());
16090       return 33;
16091     }
16092     break;
16093   }
16094   }
16095 
16096   return 1;
16097 }
16098 
16099 const Constant *
16100 RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
16101   assert(Ld && "Unexpected null LoadSDNode");
16102   if (!ISD::isNormalLoad(Ld))
16103     return nullptr;
16104 
16105   SDValue Ptr = Ld->getBasePtr();
16106 
16107   // Only constant pools with no offset are supported.
16108   auto GetSupportedConstantPool = [](SDValue Ptr) -> ConstantPoolSDNode * {
16109     auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
16110     if (!CNode || CNode->isMachineConstantPoolEntry() ||
16111         CNode->getOffset() != 0)
16112       return nullptr;
16113 
16114     return CNode;
16115   };
16116 
16117   // Simple case, LLA.
16118   if (Ptr.getOpcode() == RISCVISD::LLA) {
16119     auto *CNode = GetSupportedConstantPool(Ptr);
16120     if (!CNode || CNode->getTargetFlags() != 0)
16121       return nullptr;
16122 
16123     return CNode->getConstVal();
16124   }
16125 
16126   // Look for a HI and ADD_LO pair.
16127   if (Ptr.getOpcode() != RISCVISD::ADD_LO ||
16128       Ptr.getOperand(0).getOpcode() != RISCVISD::HI)
16129     return nullptr;
16130 
16131   auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(1));
16132   auto *CNodeHi = GetSupportedConstantPool(Ptr.getOperand(0).getOperand(0));
16133 
16134   if (!CNodeLo || CNodeLo->getTargetFlags() != RISCVII::MO_LO ||
16135       !CNodeHi || CNodeHi->getTargetFlags() != RISCVII::MO_HI)
16136     return nullptr;
16137 
16138   if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
16139     return nullptr;
16140 
16141   return CNodeLo->getConstVal();
16142 }
16143 
16144 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
16145                                                   MachineBasicBlock *BB) {
16146   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
16147 
16148   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
16149   // Should the count have wrapped while it was being read, we need to try
16150   // again.
16151   // ...
16152   // read:
16153   // rdcycleh x3 # load high word of cycle
16154   // rdcycle  x2 # load low word of cycle
16155   // rdcycleh x4 # load high word of cycle
16156   // bne x3, x4, read # check if high word reads match, otherwise try again
16157   // ...
16158 
16159   MachineFunction &MF = *BB->getParent();
16160   const BasicBlock *LLVM_BB = BB->getBasicBlock();
16161   MachineFunction::iterator It = ++BB->getIterator();
16162 
16163   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
16164   MF.insert(It, LoopMBB);
16165 
16166   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
16167   MF.insert(It, DoneMBB);
16168 
16169   // Transfer the remainder of BB and its successor edges to DoneMBB.
16170   DoneMBB->splice(DoneMBB->begin(), BB,
16171                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
16172   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
16173 
16174   BB->addSuccessor(LoopMBB);
16175 
16176   MachineRegisterInfo &RegInfo = MF.getRegInfo();
16177   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
16178   Register LoReg = MI.getOperand(0).getReg();
16179   Register HiReg = MI.getOperand(1).getReg();
16180   DebugLoc DL = MI.getDebugLoc();
16181 
16182   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
16183   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
16184       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
16185       .addReg(RISCV::X0);
16186   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
16187       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
16188       .addReg(RISCV::X0);
16189   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
16190       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
16191       .addReg(RISCV::X0);
16192 
16193   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
16194       .addReg(HiReg)
16195       .addReg(ReadAgainReg)
16196       .addMBB(LoopMBB);
16197 
16198   LoopMBB->addSuccessor(LoopMBB);
16199   LoopMBB->addSuccessor(DoneMBB);
16200 
16201   MI.eraseFromParent();
16202 
16203   return DoneMBB;
16204 }
16205 
16206 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
16207                                              MachineBasicBlock *BB,
16208                                              const RISCVSubtarget &Subtarget) {
16209   assert((MI.getOpcode() == RISCV::SplitF64Pseudo ||
16210           MI.getOpcode() == RISCV::SplitF64Pseudo_INX) &&
16211          "Unexpected instruction");
16212 
16213   MachineFunction &MF = *BB->getParent();
16214   DebugLoc DL = MI.getDebugLoc();
16215   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
16216   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
16217   Register LoReg = MI.getOperand(0).getReg();
16218   Register HiReg = MI.getOperand(1).getReg();
16219   Register SrcReg = MI.getOperand(2).getReg();
16220 
16221   const TargetRegisterClass *SrcRC = MI.getOpcode() == RISCV::SplitF64Pseudo_INX
16222                                          ? &RISCV::GPRPF64RegClass
16223                                          : &RISCV::FPR64RegClass;
16224   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
16225 
16226   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
16227                           RI, Register());
16228   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
16229   MachineMemOperand *MMOLo =
16230       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
16231   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
16232       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
16233   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
16234       .addFrameIndex(FI)
16235       .addImm(0)
16236       .addMemOperand(MMOLo);
16237   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
16238       .addFrameIndex(FI)
16239       .addImm(4)
16240       .addMemOperand(MMOHi);
16241   MI.eraseFromParent(); // The pseudo instruction is gone now.
16242   return BB;
16243 }
16244 
16245 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
16246                                                  MachineBasicBlock *BB,
16247                                                  const RISCVSubtarget &Subtarget) {
16248   assert((MI.getOpcode() == RISCV::BuildPairF64Pseudo ||
16249           MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX) &&
16250          "Unexpected instruction");
16251 
16252   MachineFunction &MF = *BB->getParent();
16253   DebugLoc DL = MI.getDebugLoc();
16254   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
16255   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
16256   Register DstReg = MI.getOperand(0).getReg();
16257   Register LoReg = MI.getOperand(1).getReg();
16258   Register HiReg = MI.getOperand(2).getReg();
16259 
16260   const TargetRegisterClass *DstRC =
16261       MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX ? &RISCV::GPRPF64RegClass
16262                                                       : &RISCV::FPR64RegClass;
16263   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
16264 
16265   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
16266   MachineMemOperand *MMOLo =
16267       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
16268   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
16269       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
16270   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
16271       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
16272       .addFrameIndex(FI)
16273       .addImm(0)
16274       .addMemOperand(MMOLo);
16275   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
16276       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
16277       .addFrameIndex(FI)
16278       .addImm(4)
16279       .addMemOperand(MMOHi);
16280   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI, Register());
16281   MI.eraseFromParent(); // The pseudo instruction is gone now.
16282   return BB;
16283 }
16284 
16285 static bool isSelectPseudo(MachineInstr &MI) {
16286   switch (MI.getOpcode()) {
16287   default:
16288     return false;
16289   case RISCV::Select_GPR_Using_CC_GPR:
16290   case RISCV::Select_FPR16_Using_CC_GPR:
16291   case RISCV::Select_FPR16INX_Using_CC_GPR:
16292   case RISCV::Select_FPR32_Using_CC_GPR:
16293   case RISCV::Select_FPR32INX_Using_CC_GPR:
16294   case RISCV::Select_FPR64_Using_CC_GPR:
16295   case RISCV::Select_FPR64INX_Using_CC_GPR:
16296   case RISCV::Select_FPR64IN32X_Using_CC_GPR:
16297     return true;
16298   }
16299 }
16300 
16301 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
16302                                         unsigned RelOpcode, unsigned EqOpcode,
16303                                         const RISCVSubtarget &Subtarget) {
16304   DebugLoc DL = MI.getDebugLoc();
16305   Register DstReg = MI.getOperand(0).getReg();
16306   Register Src1Reg = MI.getOperand(1).getReg();
16307   Register Src2Reg = MI.getOperand(2).getReg();
16308   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
16309   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
16310   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
16311 
16312   // Save the current FFLAGS.
16313   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
16314 
16315   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
16316                  .addReg(Src1Reg)
16317                  .addReg(Src2Reg);
16318   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
16319     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
16320 
16321   // Restore the FFLAGS.
16322   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
16323       .addReg(SavedFFlags, RegState::Kill);
16324 
16325   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
16326   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
16327                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
16328                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
16329   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
16330     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
16331 
16332   // Erase the pseudoinstruction.
16333   MI.eraseFromParent();
16334   return BB;
16335 }
16336 
16337 static MachineBasicBlock *
16338 EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second,
16339                           MachineBasicBlock *ThisMBB,
16340                           const RISCVSubtarget &Subtarget) {
16341   // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5)
16342   // Without this, custom-inserter would have generated:
16343   //
16344   //   A
16345   //   | \
16346   //   |  B
16347   //   | /
16348   //   C
16349   //   | \
16350   //   |  D
16351   //   | /
16352   //   E
16353   //
16354   // A: X = ...; Y = ...
16355   // B: empty
16356   // C: Z = PHI [X, A], [Y, B]
16357   // D: empty
16358   // E: PHI [X, C], [Z, D]
16359   //
16360   // If we lower both Select_FPRX_ in a single step, we can instead generate:
16361   //
16362   //   A
16363   //   | \
16364   //   |  C
16365   //   | /|
16366   //   |/ |
16367   //   |  |
16368   //   |  D
16369   //   | /
16370   //   E
16371   //
16372   // A: X = ...; Y = ...
16373   // D: empty
16374   // E: PHI [X, A], [X, C], [Y, D]
16375 
16376   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
16377   const DebugLoc &DL = First.getDebugLoc();
16378   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
16379   MachineFunction *F = ThisMBB->getParent();
16380   MachineBasicBlock *FirstMBB = F->CreateMachineBasicBlock(LLVM_BB);
16381   MachineBasicBlock *SecondMBB = F->CreateMachineBasicBlock(LLVM_BB);
16382   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
16383   MachineFunction::iterator It = ++ThisMBB->getIterator();
16384   F->insert(It, FirstMBB);
16385   F->insert(It, SecondMBB);
16386   F->insert(It, SinkMBB);
16387 
16388   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
16389   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
16390                   std::next(MachineBasicBlock::iterator(First)),
16391                   ThisMBB->end());
16392   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
16393 
16394   // Fallthrough block for ThisMBB.
16395   ThisMBB->addSuccessor(FirstMBB);
16396   // Fallthrough block for FirstMBB.
16397   FirstMBB->addSuccessor(SecondMBB);
16398   ThisMBB->addSuccessor(SinkMBB);
16399   FirstMBB->addSuccessor(SinkMBB);
16400   // This is fallthrough.
16401   SecondMBB->addSuccessor(SinkMBB);
16402 
16403   auto FirstCC = static_cast<RISCVCC::CondCode>(First.getOperand(3).getImm());
16404   Register FLHS = First.getOperand(1).getReg();
16405   Register FRHS = First.getOperand(2).getReg();
16406   // Insert appropriate branch.
16407   BuildMI(FirstMBB, DL, TII.getBrCond(FirstCC))
16408       .addReg(FLHS)
16409       .addReg(FRHS)
16410       .addMBB(SinkMBB);
16411 
16412   Register SLHS = Second.getOperand(1).getReg();
16413   Register SRHS = Second.getOperand(2).getReg();
16414   Register Op1Reg4 = First.getOperand(4).getReg();
16415   Register Op1Reg5 = First.getOperand(5).getReg();
16416 
16417   auto SecondCC = static_cast<RISCVCC::CondCode>(Second.getOperand(3).getImm());
16418   // Insert appropriate branch.
16419   BuildMI(ThisMBB, DL, TII.getBrCond(SecondCC))
16420       .addReg(SLHS)
16421       .addReg(SRHS)
16422       .addMBB(SinkMBB);
16423 
16424   Register DestReg = Second.getOperand(0).getReg();
16425   Register Op2Reg4 = Second.getOperand(4).getReg();
16426   BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(RISCV::PHI), DestReg)
16427       .addReg(Op2Reg4)
16428       .addMBB(ThisMBB)
16429       .addReg(Op1Reg4)
16430       .addMBB(FirstMBB)
16431       .addReg(Op1Reg5)
16432       .addMBB(SecondMBB);
16433 
16434   // Now remove the Select_FPRX_s.
16435   First.eraseFromParent();
16436   Second.eraseFromParent();
16437   return SinkMBB;
16438 }
16439 
16440 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
16441                                            MachineBasicBlock *BB,
16442                                            const RISCVSubtarget &Subtarget) {
16443   // To "insert" Select_* instructions, we actually have to insert the triangle
16444   // control-flow pattern.  The incoming instructions know the destination vreg
16445   // to set, the condition code register to branch on, the true/false values to
16446   // select between, and the condcode to use to select the appropriate branch.
16447   //
16448   // We produce the following control flow:
16449   //     HeadMBB
16450   //     |  \
16451   //     |  IfFalseMBB
16452   //     | /
16453   //    TailMBB
16454   //
16455   // When we find a sequence of selects we attempt to optimize their emission
16456   // by sharing the control flow. Currently we only handle cases where we have
16457   // multiple selects with the exact same condition (same LHS, RHS and CC).
16458   // The selects may be interleaved with other instructions if the other
16459   // instructions meet some requirements we deem safe:
16460   // - They are not pseudo instructions.
16461   // - They are debug instructions. Otherwise,
16462   // - They do not have side-effects, do not access memory and their inputs do
16463   //   not depend on the results of the select pseudo-instructions.
16464   // The TrueV/FalseV operands of the selects cannot depend on the result of
16465   // previous selects in the sequence.
16466   // These conditions could be further relaxed. See the X86 target for a
16467   // related approach and more information.
16468   //
16469   // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5))
16470   // is checked here and handled by a separate function -
16471   // EmitLoweredCascadedSelect.
16472   Register LHS = MI.getOperand(1).getReg();
16473   Register RHS = MI.getOperand(2).getReg();
16474   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
16475 
16476   SmallVector<MachineInstr *, 4> SelectDebugValues;
16477   SmallSet<Register, 4> SelectDests;
16478   SelectDests.insert(MI.getOperand(0).getReg());
16479 
16480   MachineInstr *LastSelectPseudo = &MI;
16481   auto Next = next_nodbg(MI.getIterator(), BB->instr_end());
16482   if (MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->end() &&
16483       Next->getOpcode() == MI.getOpcode() &&
16484       Next->getOperand(5).getReg() == MI.getOperand(0).getReg() &&
16485       Next->getOperand(5).isKill()) {
16486     return EmitLoweredCascadedSelect(MI, *Next, BB, Subtarget);
16487   }
16488 
16489   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
16490        SequenceMBBI != E; ++SequenceMBBI) {
16491     if (SequenceMBBI->isDebugInstr())
16492       continue;
16493     if (isSelectPseudo(*SequenceMBBI)) {
16494       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
16495           SequenceMBBI->getOperand(2).getReg() != RHS ||
16496           SequenceMBBI->getOperand(3).getImm() != CC ||
16497           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
16498           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
16499         break;
16500       LastSelectPseudo = &*SequenceMBBI;
16501       SequenceMBBI->collectDebugValues(SelectDebugValues);
16502       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
16503       continue;
16504     }
16505     if (SequenceMBBI->hasUnmodeledSideEffects() ||
16506         SequenceMBBI->mayLoadOrStore() ||
16507         SequenceMBBI->usesCustomInsertionHook())
16508       break;
16509     if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
16510           return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
16511         }))
16512       break;
16513   }
16514 
16515   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
16516   const BasicBlock *LLVM_BB = BB->getBasicBlock();
16517   DebugLoc DL = MI.getDebugLoc();
16518   MachineFunction::iterator I = ++BB->getIterator();
16519 
16520   MachineBasicBlock *HeadMBB = BB;
16521   MachineFunction *F = BB->getParent();
16522   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
16523   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
16524 
16525   F->insert(I, IfFalseMBB);
16526   F->insert(I, TailMBB);
16527 
16528   // Transfer debug instructions associated with the selects to TailMBB.
16529   for (MachineInstr *DebugInstr : SelectDebugValues) {
16530     TailMBB->push_back(DebugInstr->removeFromParent());
16531   }
16532 
16533   // Move all instructions after the sequence to TailMBB.
16534   TailMBB->splice(TailMBB->end(), HeadMBB,
16535                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
16536   // Update machine-CFG edges by transferring all successors of the current
16537   // block to the new block which will contain the Phi nodes for the selects.
16538   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
16539   // Set the successors for HeadMBB.
16540   HeadMBB->addSuccessor(IfFalseMBB);
16541   HeadMBB->addSuccessor(TailMBB);
16542 
16543   // Insert appropriate branch.
16544   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
16545     .addReg(LHS)
16546     .addReg(RHS)
16547     .addMBB(TailMBB);
16548 
16549   // IfFalseMBB just falls through to TailMBB.
16550   IfFalseMBB->addSuccessor(TailMBB);
16551 
16552   // Create PHIs for all of the select pseudo-instructions.
16553   auto SelectMBBI = MI.getIterator();
16554   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
16555   auto InsertionPoint = TailMBB->begin();
16556   while (SelectMBBI != SelectEnd) {
16557     auto Next = std::next(SelectMBBI);
16558     if (isSelectPseudo(*SelectMBBI)) {
16559       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
16560       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
16561               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
16562           .addReg(SelectMBBI->getOperand(4).getReg())
16563           .addMBB(HeadMBB)
16564           .addReg(SelectMBBI->getOperand(5).getReg())
16565           .addMBB(IfFalseMBB);
16566       SelectMBBI->eraseFromParent();
16567     }
16568     SelectMBBI = Next;
16569   }
16570 
16571   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
16572   return TailMBB;
16573 }
16574 
16575 static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
16576                                                     MachineBasicBlock *BB,
16577                                                     unsigned CVTXOpc,
16578                                                     unsigned CVTFOpc) {
16579   DebugLoc DL = MI.getDebugLoc();
16580 
16581   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
16582 
16583   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
16584   Register SavedFFLAGS = MRI.createVirtualRegister(&RISCV::GPRRegClass);
16585 
16586   // Save the old value of FFLAGS.
16587   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFLAGS);
16588 
16589   assert(MI.getNumOperands() == 7);
16590 
16591   // Emit a VFCVT_X_F
16592   const TargetRegisterInfo *TRI =
16593       BB->getParent()->getSubtarget().getRegisterInfo();
16594   const TargetRegisterClass *RC = MI.getRegClassConstraint(0, &TII, TRI);
16595   Register Tmp = MRI.createVirtualRegister(RC);
16596   BuildMI(*BB, MI, DL, TII.get(CVTXOpc), Tmp)
16597       .add(MI.getOperand(1))
16598       .add(MI.getOperand(2))
16599       .add(MI.getOperand(3))
16600       .add(MachineOperand::CreateImm(7)) // frm = DYN
16601       .add(MI.getOperand(4))
16602       .add(MI.getOperand(5))
16603       .add(MI.getOperand(6))
16604       .add(MachineOperand::CreateReg(RISCV::FRM,
16605                                      /*IsDef*/ false,
16606                                      /*IsImp*/ true));
16607 
16608   // Emit a VFCVT_F_X
16609   BuildMI(*BB, MI, DL, TII.get(CVTFOpc))
16610       .add(MI.getOperand(0))
16611       .add(MI.getOperand(1))
16612       .addReg(Tmp)
16613       .add(MI.getOperand(3))
16614       .add(MachineOperand::CreateImm(7)) // frm = DYN
16615       .add(MI.getOperand(4))
16616       .add(MI.getOperand(5))
16617       .add(MI.getOperand(6))
16618       .add(MachineOperand::CreateReg(RISCV::FRM,
16619                                      /*IsDef*/ false,
16620                                      /*IsImp*/ true));
16621 
16622   // Restore FFLAGS.
16623   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
16624       .addReg(SavedFFLAGS, RegState::Kill);
16625 
16626   // Erase the pseudoinstruction.
16627   MI.eraseFromParent();
16628   return BB;
16629 }
16630 
16631 static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
16632                                      const RISCVSubtarget &Subtarget) {
16633   unsigned CmpOpc, F2IOpc, I2FOpc, FSGNJOpc, FSGNJXOpc;
16634   const TargetRegisterClass *RC;
16635   switch (MI.getOpcode()) {
16636   default:
16637     llvm_unreachable("Unexpected opcode");
16638   case RISCV::PseudoFROUND_H:
16639     CmpOpc = RISCV::FLT_H;
16640     F2IOpc = RISCV::FCVT_W_H;
16641     I2FOpc = RISCV::FCVT_H_W;
16642     FSGNJOpc = RISCV::FSGNJ_H;
16643     FSGNJXOpc = RISCV::FSGNJX_H;
16644     RC = &RISCV::FPR16RegClass;
16645     break;
16646   case RISCV::PseudoFROUND_H_INX:
16647     CmpOpc = RISCV::FLT_H_INX;
16648     F2IOpc = RISCV::FCVT_W_H_INX;
16649     I2FOpc = RISCV::FCVT_H_W_INX;
16650     FSGNJOpc = RISCV::FSGNJ_H_INX;
16651     FSGNJXOpc = RISCV::FSGNJX_H_INX;
16652     RC = &RISCV::GPRF16RegClass;
16653     break;
16654   case RISCV::PseudoFROUND_S:
16655     CmpOpc = RISCV::FLT_S;
16656     F2IOpc = RISCV::FCVT_W_S;
16657     I2FOpc = RISCV::FCVT_S_W;
16658     FSGNJOpc = RISCV::FSGNJ_S;
16659     FSGNJXOpc = RISCV::FSGNJX_S;
16660     RC = &RISCV::FPR32RegClass;
16661     break;
16662   case RISCV::PseudoFROUND_S_INX:
16663     CmpOpc = RISCV::FLT_S_INX;
16664     F2IOpc = RISCV::FCVT_W_S_INX;
16665     I2FOpc = RISCV::FCVT_S_W_INX;
16666     FSGNJOpc = RISCV::FSGNJ_S_INX;
16667     FSGNJXOpc = RISCV::FSGNJX_S_INX;
16668     RC = &RISCV::GPRF32RegClass;
16669     break;
16670   case RISCV::PseudoFROUND_D:
16671     assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
16672     CmpOpc = RISCV::FLT_D;
16673     F2IOpc = RISCV::FCVT_L_D;
16674     I2FOpc = RISCV::FCVT_D_L;
16675     FSGNJOpc = RISCV::FSGNJ_D;
16676     FSGNJXOpc = RISCV::FSGNJX_D;
16677     RC = &RISCV::FPR64RegClass;
16678     break;
16679   case RISCV::PseudoFROUND_D_INX:
16680     assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
16681     CmpOpc = RISCV::FLT_D_INX;
16682     F2IOpc = RISCV::FCVT_L_D_INX;
16683     I2FOpc = RISCV::FCVT_D_L_INX;
16684     FSGNJOpc = RISCV::FSGNJ_D_INX;
16685     FSGNJXOpc = RISCV::FSGNJX_D_INX;
16686     RC = &RISCV::GPRRegClass;
16687     break;
16688   }
16689 
16690   const BasicBlock *BB = MBB->getBasicBlock();
16691   DebugLoc DL = MI.getDebugLoc();
16692   MachineFunction::iterator I = ++MBB->getIterator();
16693 
16694   MachineFunction *F = MBB->getParent();
16695   MachineBasicBlock *CvtMBB = F->CreateMachineBasicBlock(BB);
16696   MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB);
16697 
16698   F->insert(I, CvtMBB);
16699   F->insert(I, DoneMBB);
16700   // Move all instructions after the sequence to DoneMBB.
16701   DoneMBB->splice(DoneMBB->end(), MBB, MachineBasicBlock::iterator(MI),
16702                   MBB->end());
16703   // Update machine-CFG edges by transferring all successors of the current
16704   // block to the new block which will contain the Phi nodes for the selects.
16705   DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
16706   // Set the successors for MBB.
16707   MBB->addSuccessor(CvtMBB);
16708   MBB->addSuccessor(DoneMBB);
16709 
16710   Register DstReg = MI.getOperand(0).getReg();
16711   Register SrcReg = MI.getOperand(1).getReg();
16712   Register MaxReg = MI.getOperand(2).getReg();
16713   int64_t FRM = MI.getOperand(3).getImm();
16714 
16715   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
16716   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
16717 
16718   Register FabsReg = MRI.createVirtualRegister(RC);
16719   BuildMI(MBB, DL, TII.get(FSGNJXOpc), FabsReg).addReg(SrcReg).addReg(SrcReg);
16720 
16721   // Compare the FP value to the max value.
16722   Register CmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
16723   auto MIB =
16724       BuildMI(MBB, DL, TII.get(CmpOpc), CmpReg).addReg(FabsReg).addReg(MaxReg);
16725   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
16726     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
16727 
16728   // Insert branch.
16729   BuildMI(MBB, DL, TII.get(RISCV::BEQ))
16730       .addReg(CmpReg)
16731       .addReg(RISCV::X0)
16732       .addMBB(DoneMBB);
16733 
16734   CvtMBB->addSuccessor(DoneMBB);
16735 
16736   // Convert to integer.
16737   Register F2IReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
16738   MIB = BuildMI(CvtMBB, DL, TII.get(F2IOpc), F2IReg).addReg(SrcReg).addImm(FRM);
16739   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
16740     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
16741 
16742   // Convert back to FP.
16743   Register I2FReg = MRI.createVirtualRegister(RC);
16744   MIB = BuildMI(CvtMBB, DL, TII.get(I2FOpc), I2FReg).addReg(F2IReg).addImm(FRM);
16745   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
16746     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
16747 
16748   // Restore the sign bit.
16749   Register CvtReg = MRI.createVirtualRegister(RC);
16750   BuildMI(CvtMBB, DL, TII.get(FSGNJOpc), CvtReg).addReg(I2FReg).addReg(SrcReg);
16751 
16752   // Merge the results.
16753   BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(RISCV::PHI), DstReg)
16754       .addReg(SrcReg)
16755       .addMBB(MBB)
16756       .addReg(CvtReg)
16757       .addMBB(CvtMBB);
16758 
16759   MI.eraseFromParent();
16760   return DoneMBB;
16761 }
16762 
16763 MachineBasicBlock *
16764 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
16765                                                  MachineBasicBlock *BB) const {
16766   switch (MI.getOpcode()) {
16767   default:
16768     llvm_unreachable("Unexpected instr type to insert");
16769   case RISCV::ReadCycleWide:
16770     assert(!Subtarget.is64Bit() &&
16771            "ReadCycleWrite is only to be used on riscv32");
16772     return emitReadCycleWidePseudo(MI, BB);
16773   case RISCV::Select_GPR_Using_CC_GPR:
16774   case RISCV::Select_FPR16_Using_CC_GPR:
16775   case RISCV::Select_FPR16INX_Using_CC_GPR:
16776   case RISCV::Select_FPR32_Using_CC_GPR:
16777   case RISCV::Select_FPR32INX_Using_CC_GPR:
16778   case RISCV::Select_FPR64_Using_CC_GPR:
16779   case RISCV::Select_FPR64INX_Using_CC_GPR:
16780   case RISCV::Select_FPR64IN32X_Using_CC_GPR:
16781     return emitSelectPseudo(MI, BB, Subtarget);
16782   case RISCV::BuildPairF64Pseudo:
16783   case RISCV::BuildPairF64Pseudo_INX:
16784     return emitBuildPairF64Pseudo(MI, BB, Subtarget);
16785   case RISCV::SplitF64Pseudo:
16786   case RISCV::SplitF64Pseudo_INX:
16787     return emitSplitF64Pseudo(MI, BB, Subtarget);
16788   case RISCV::PseudoQuietFLE_H:
16789     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
16790   case RISCV::PseudoQuietFLE_H_INX:
16791     return emitQuietFCMP(MI, BB, RISCV::FLE_H_INX, RISCV::FEQ_H_INX, Subtarget);
16792   case RISCV::PseudoQuietFLT_H:
16793     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
16794   case RISCV::PseudoQuietFLT_H_INX:
16795     return emitQuietFCMP(MI, BB, RISCV::FLT_H_INX, RISCV::FEQ_H_INX, Subtarget);
16796   case RISCV::PseudoQuietFLE_S:
16797     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
16798   case RISCV::PseudoQuietFLE_S_INX:
16799     return emitQuietFCMP(MI, BB, RISCV::FLE_S_INX, RISCV::FEQ_S_INX, Subtarget);
16800   case RISCV::PseudoQuietFLT_S:
16801     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
16802   case RISCV::PseudoQuietFLT_S_INX:
16803     return emitQuietFCMP(MI, BB, RISCV::FLT_S_INX, RISCV::FEQ_S_INX, Subtarget);
16804   case RISCV::PseudoQuietFLE_D:
16805     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
16806   case RISCV::PseudoQuietFLE_D_INX:
16807     return emitQuietFCMP(MI, BB, RISCV::FLE_D_INX, RISCV::FEQ_D_INX, Subtarget);
16808   case RISCV::PseudoQuietFLE_D_IN32X:
16809     return emitQuietFCMP(MI, BB, RISCV::FLE_D_IN32X, RISCV::FEQ_D_IN32X,
16810                          Subtarget);
16811   case RISCV::PseudoQuietFLT_D:
16812     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
16813   case RISCV::PseudoQuietFLT_D_INX:
16814     return emitQuietFCMP(MI, BB, RISCV::FLT_D_INX, RISCV::FEQ_D_INX, Subtarget);
16815   case RISCV::PseudoQuietFLT_D_IN32X:
16816     return emitQuietFCMP(MI, BB, RISCV::FLT_D_IN32X, RISCV::FEQ_D_IN32X,
16817                          Subtarget);
16818 
16819   case RISCV::PseudoVFROUND_NOEXCEPT_V_M1_MASK:
16820     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M1_MASK,
16821                                      RISCV::PseudoVFCVT_F_X_V_M1_MASK);
16822   case RISCV::PseudoVFROUND_NOEXCEPT_V_M2_MASK:
16823     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M2_MASK,
16824                                      RISCV::PseudoVFCVT_F_X_V_M2_MASK);
16825   case RISCV::PseudoVFROUND_NOEXCEPT_V_M4_MASK:
16826     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M4_MASK,
16827                                      RISCV::PseudoVFCVT_F_X_V_M4_MASK);
16828   case RISCV::PseudoVFROUND_NOEXCEPT_V_M8_MASK:
16829     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_M8_MASK,
16830                                      RISCV::PseudoVFCVT_F_X_V_M8_MASK);
16831   case RISCV::PseudoVFROUND_NOEXCEPT_V_MF2_MASK:
16832     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF2_MASK,
16833                                      RISCV::PseudoVFCVT_F_X_V_MF2_MASK);
16834   case RISCV::PseudoVFROUND_NOEXCEPT_V_MF4_MASK:
16835     return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF4_MASK,
16836                                      RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
16837   case RISCV::PseudoFROUND_H:
16838   case RISCV::PseudoFROUND_H_INX:
16839   case RISCV::PseudoFROUND_S:
16840   case RISCV::PseudoFROUND_S_INX:
16841   case RISCV::PseudoFROUND_D:
16842   case RISCV::PseudoFROUND_D_INX:
16843   case RISCV::PseudoFROUND_D_IN32X:
16844     return emitFROUND(MI, BB, Subtarget);
16845   case TargetOpcode::STATEPOINT:
16846   case TargetOpcode::STACKMAP:
16847   case TargetOpcode::PATCHPOINT:
16848     if (!Subtarget.is64Bit())
16849       report_fatal_error("STACKMAP, PATCHPOINT and STATEPOINT are only "
16850                          "supported on 64-bit targets");
16851     return emitPatchPoint(MI, BB);
16852   }
16853 }
16854 
16855 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
16856                                                         SDNode *Node) const {
16857   // Add FRM dependency to any instructions with dynamic rounding mode.
16858   int Idx = RISCV::getNamedOperandIdx(MI.getOpcode(), RISCV::OpName::frm);
16859   if (Idx < 0) {
16860     // Vector pseudos have FRM index indicated by TSFlags.
16861     Idx = RISCVII::getFRMOpNum(MI.getDesc());
16862     if (Idx < 0)
16863       return;
16864   }
16865   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
16866     return;
16867   // If the instruction already reads FRM, don't add another read.
16868   if (MI.readsRegister(RISCV::FRM))
16869     return;
16870   MI.addOperand(
16871       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
16872 }
16873 
16874 // Calling Convention Implementation.
16875 // The expectations for frontend ABI lowering vary from target to target.
16876 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
16877 // details, but this is a longer term goal. For now, we simply try to keep the
16878 // role of the frontend as simple and well-defined as possible. The rules can
16879 // be summarised as:
16880 // * Never split up large scalar arguments. We handle them here.
16881 // * If a hardfloat calling convention is being used, and the struct may be
16882 // passed in a pair of registers (fp+fp, int+fp), and both registers are
16883 // available, then pass as two separate arguments. If either the GPRs or FPRs
16884 // are exhausted, then pass according to the rule below.
16885 // * If a struct could never be passed in registers or directly in a stack
16886 // slot (as it is larger than 2*XLEN and the floating point rules don't
16887 // apply), then pass it using a pointer with the byval attribute.
16888 // * If a struct is less than 2*XLEN, then coerce to either a two-element
16889 // word-sized array or a 2*XLEN scalar (depending on alignment).
16890 // * The frontend can determine whether a struct is returned by reference or
16891 // not based on its size and fields. If it will be returned by reference, the
16892 // frontend must modify the prototype so a pointer with the sret annotation is
16893 // passed as the first argument. This is not necessary for large scalar
16894 // returns.
16895 // * Struct return values and varargs should be coerced to structs containing
16896 // register-size fields in the same situations they would be for fixed
16897 // arguments.
16898 
16899 static const MCPhysReg ArgFPR16s[] = {
16900   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
16901   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
16902 };
16903 static const MCPhysReg ArgFPR32s[] = {
16904   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
16905   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
16906 };
16907 static const MCPhysReg ArgFPR64s[] = {
16908   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
16909   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
16910 };
16911 // This is an interim calling convention and it may be changed in the future.
16912 static const MCPhysReg ArgVRs[] = {
16913     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
16914     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
16915     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
16916 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
16917                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
16918                                      RISCV::V20M2, RISCV::V22M2};
16919 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
16920                                      RISCV::V20M4};
16921 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
16922 
16923 ArrayRef<MCPhysReg> RISCV::getArgGPRs() {
16924   static const MCPhysReg ArgGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
16925                                       RISCV::X13, RISCV::X14, RISCV::X15,
16926                                       RISCV::X16, RISCV::X17};
16927 
16928   return ArrayRef(ArgGPRs);
16929 }
16930 
16931 // Pass a 2*XLEN argument that has been split into two XLEN values through
16932 // registers or the stack as necessary.
16933 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
16934                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
16935                                 MVT ValVT2, MVT LocVT2,
16936                                 ISD::ArgFlagsTy ArgFlags2) {
16937   unsigned XLenInBytes = XLen / 8;
16938   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs();
16939   if (Register Reg = State.AllocateReg(ArgGPRs)) {
16940     // At least one half can be passed via register.
16941     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
16942                                      VA1.getLocVT(), CCValAssign::Full));
16943   } else {
16944     // Both halves must be passed on the stack, with proper alignment.
16945     Align StackAlign =
16946         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
16947     State.addLoc(
16948         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
16949                             State.AllocateStack(XLenInBytes, StackAlign),
16950                             VA1.getLocVT(), CCValAssign::Full));
16951     State.addLoc(CCValAssign::getMem(
16952         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
16953         LocVT2, CCValAssign::Full));
16954     return false;
16955   }
16956 
16957   if (Register Reg = State.AllocateReg(ArgGPRs)) {
16958     // The second half can also be passed via register.
16959     State.addLoc(
16960         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
16961   } else {
16962     // The second half is passed via the stack, without additional alignment.
16963     State.addLoc(CCValAssign::getMem(
16964         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
16965         LocVT2, CCValAssign::Full));
16966   }
16967 
16968   return false;
16969 }
16970 
16971 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
16972                                std::optional<unsigned> FirstMaskArgument,
16973                                CCState &State, const RISCVTargetLowering &TLI) {
16974   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
16975   if (RC == &RISCV::VRRegClass) {
16976     // Assign the first mask argument to V0.
16977     // This is an interim calling convention and it may be changed in the
16978     // future.
16979     if (FirstMaskArgument && ValNo == *FirstMaskArgument)
16980       return State.AllocateReg(RISCV::V0);
16981     return State.AllocateReg(ArgVRs);
16982   }
16983   if (RC == &RISCV::VRM2RegClass)
16984     return State.AllocateReg(ArgVRM2s);
16985   if (RC == &RISCV::VRM4RegClass)
16986     return State.AllocateReg(ArgVRM4s);
16987   if (RC == &RISCV::VRM8RegClass)
16988     return State.AllocateReg(ArgVRM8s);
16989   llvm_unreachable("Unhandled register class for ValueType");
16990 }
16991 
16992 // Implements the RISC-V calling convention. Returns true upon failure.
16993 bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
16994                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
16995                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
16996                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
16997                      std::optional<unsigned> FirstMaskArgument) {
16998   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
16999   assert(XLen == 32 || XLen == 64);
17000   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
17001 
17002   // Static chain parameter must not be passed in normal argument registers,
17003   // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain
17004   if (ArgFlags.isNest()) {
17005     if (unsigned Reg = State.AllocateReg(RISCV::X7)) {
17006       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17007       return false;
17008     }
17009   }
17010 
17011   // Any return value split in to more than two values can't be returned
17012   // directly. Vectors are returned via the available vector registers.
17013   if (!LocVT.isVector() && IsRet && ValNo > 1)
17014     return true;
17015 
17016   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
17017   // variadic argument, or if no F16/F32 argument registers are available.
17018   bool UseGPRForF16_F32 = true;
17019   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
17020   // variadic argument, or if no F64 argument registers are available.
17021   bool UseGPRForF64 = true;
17022 
17023   switch (ABI) {
17024   default:
17025     llvm_unreachable("Unexpected ABI");
17026   case RISCVABI::ABI_ILP32:
17027   case RISCVABI::ABI_LP64:
17028     break;
17029   case RISCVABI::ABI_ILP32F:
17030   case RISCVABI::ABI_LP64F:
17031     UseGPRForF16_F32 = !IsFixed;
17032     break;
17033   case RISCVABI::ABI_ILP32D:
17034   case RISCVABI::ABI_LP64D:
17035     UseGPRForF16_F32 = !IsFixed;
17036     UseGPRForF64 = !IsFixed;
17037     break;
17038   }
17039 
17040   // FPR16, FPR32, and FPR64 alias each other.
17041   if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) {
17042     UseGPRForF16_F32 = true;
17043     UseGPRForF64 = true;
17044   }
17045 
17046   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
17047   // similar local variables rather than directly checking against the target
17048   // ABI.
17049 
17050   if (UseGPRForF16_F32 &&
17051       (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
17052     LocVT = XLenVT;
17053     LocInfo = CCValAssign::BCvt;
17054   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
17055     LocVT = MVT::i64;
17056     LocInfo = CCValAssign::BCvt;
17057   }
17058 
17059   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs();
17060 
17061   // If this is a variadic argument, the RISC-V calling convention requires
17062   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
17063   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
17064   // be used regardless of whether the original argument was split during
17065   // legalisation or not. The argument will not be passed by registers if the
17066   // original type is larger than 2*XLEN, so the register alignment rule does
17067   // not apply.
17068   unsigned TwoXLenInBytes = (2 * XLen) / 8;
17069   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
17070       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
17071     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
17072     // Skip 'odd' register if necessary.
17073     if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
17074       State.AllocateReg(ArgGPRs);
17075   }
17076 
17077   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
17078   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
17079       State.getPendingArgFlags();
17080 
17081   assert(PendingLocs.size() == PendingArgFlags.size() &&
17082          "PendingLocs and PendingArgFlags out of sync");
17083 
17084   // Handle passing f64 on RV32D with a soft float ABI or when floating point
17085   // registers are exhausted.
17086   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
17087     assert(PendingLocs.empty() && "Can't lower f64 if it is split");
17088     // Depending on available argument GPRS, f64 may be passed in a pair of
17089     // GPRs, split between a GPR and the stack, or passed completely on the
17090     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
17091     // cases.
17092     Register Reg = State.AllocateReg(ArgGPRs);
17093     if (!Reg) {
17094       unsigned StackOffset = State.AllocateStack(8, Align(8));
17095       State.addLoc(
17096           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
17097       return false;
17098     }
17099     LocVT = MVT::i32;
17100     State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17101     Register HiReg = State.AllocateReg(ArgGPRs);
17102     if (HiReg) {
17103       State.addLoc(
17104           CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
17105     } else {
17106       unsigned StackOffset = State.AllocateStack(4, Align(4));
17107       State.addLoc(
17108           CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
17109     }
17110     return false;
17111   }
17112 
17113   // Fixed-length vectors are located in the corresponding scalable-vector
17114   // container types.
17115   if (ValVT.isFixedLengthVector())
17116     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
17117 
17118   // Split arguments might be passed indirectly, so keep track of the pending
17119   // values. Split vectors are passed via a mix of registers and indirectly, so
17120   // treat them as we would any other argument.
17121   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
17122     LocVT = XLenVT;
17123     LocInfo = CCValAssign::Indirect;
17124     PendingLocs.push_back(
17125         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
17126     PendingArgFlags.push_back(ArgFlags);
17127     if (!ArgFlags.isSplitEnd()) {
17128       return false;
17129     }
17130   }
17131 
17132   // If the split argument only had two elements, it should be passed directly
17133   // in registers or on the stack.
17134   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
17135       PendingLocs.size() <= 2) {
17136     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
17137     // Apply the normal calling convention rules to the first half of the
17138     // split argument.
17139     CCValAssign VA = PendingLocs[0];
17140     ISD::ArgFlagsTy AF = PendingArgFlags[0];
17141     PendingLocs.clear();
17142     PendingArgFlags.clear();
17143     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
17144                                ArgFlags);
17145   }
17146 
17147   // Allocate to a register if possible, or else a stack slot.
17148   Register Reg;
17149   unsigned StoreSizeBytes = XLen / 8;
17150   Align StackAlign = Align(XLen / 8);
17151 
17152   if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32)
17153     Reg = State.AllocateReg(ArgFPR16s);
17154   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
17155     Reg = State.AllocateReg(ArgFPR32s);
17156   else if (ValVT == MVT::f64 && !UseGPRForF64)
17157     Reg = State.AllocateReg(ArgFPR64s);
17158   else if (ValVT.isVector()) {
17159     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
17160     if (!Reg) {
17161       // For return values, the vector must be passed fully via registers or
17162       // via the stack.
17163       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
17164       // but we're using all of them.
17165       if (IsRet)
17166         return true;
17167       // Try using a GPR to pass the address
17168       if ((Reg = State.AllocateReg(ArgGPRs))) {
17169         LocVT = XLenVT;
17170         LocInfo = CCValAssign::Indirect;
17171       } else if (ValVT.isScalableVector()) {
17172         LocVT = XLenVT;
17173         LocInfo = CCValAssign::Indirect;
17174       } else {
17175         // Pass fixed-length vectors on the stack.
17176         LocVT = ValVT;
17177         StoreSizeBytes = ValVT.getStoreSize();
17178         // Align vectors to their element sizes, being careful for vXi1
17179         // vectors.
17180         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
17181       }
17182     }
17183   } else {
17184     Reg = State.AllocateReg(ArgGPRs);
17185   }
17186 
17187   unsigned StackOffset =
17188       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
17189 
17190   // If we reach this point and PendingLocs is non-empty, we must be at the
17191   // end of a split argument that must be passed indirectly.
17192   if (!PendingLocs.empty()) {
17193     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
17194     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
17195 
17196     for (auto &It : PendingLocs) {
17197       if (Reg)
17198         It.convertToReg(Reg);
17199       else
17200         It.convertToMem(StackOffset);
17201       State.addLoc(It);
17202     }
17203     PendingLocs.clear();
17204     PendingArgFlags.clear();
17205     return false;
17206   }
17207 
17208   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
17209           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
17210          "Expected an XLenVT or vector types at this stage");
17211 
17212   if (Reg) {
17213     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17214     return false;
17215   }
17216 
17217   // When a scalar floating-point value is passed on the stack, no
17218   // bit-conversion is needed.
17219   if (ValVT.isFloatingPoint() && LocInfo != CCValAssign::Indirect) {
17220     assert(!ValVT.isVector());
17221     LocVT = ValVT;
17222     LocInfo = CCValAssign::Full;
17223   }
17224   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
17225   return false;
17226 }
17227 
17228 template <typename ArgTy>
17229 static std::optional<unsigned> preAssignMask(const ArgTy &Args) {
17230   for (const auto &ArgIdx : enumerate(Args)) {
17231     MVT ArgVT = ArgIdx.value().VT;
17232     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
17233       return ArgIdx.index();
17234   }
17235   return std::nullopt;
17236 }
17237 
17238 void RISCVTargetLowering::analyzeInputArgs(
17239     MachineFunction &MF, CCState &CCInfo,
17240     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
17241     RISCVCCAssignFn Fn) const {
17242   unsigned NumArgs = Ins.size();
17243   FunctionType *FType = MF.getFunction().getFunctionType();
17244 
17245   std::optional<unsigned> FirstMaskArgument;
17246   if (Subtarget.hasVInstructions())
17247     FirstMaskArgument = preAssignMask(Ins);
17248 
17249   for (unsigned i = 0; i != NumArgs; ++i) {
17250     MVT ArgVT = Ins[i].VT;
17251     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
17252 
17253     Type *ArgTy = nullptr;
17254     if (IsRet)
17255       ArgTy = FType->getReturnType();
17256     else if (Ins[i].isOrigArg())
17257       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
17258 
17259     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
17260     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
17261            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
17262            FirstMaskArgument)) {
17263       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
17264                         << ArgVT << '\n');
17265       llvm_unreachable(nullptr);
17266     }
17267   }
17268 }
17269 
17270 void RISCVTargetLowering::analyzeOutputArgs(
17271     MachineFunction &MF, CCState &CCInfo,
17272     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
17273     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
17274   unsigned NumArgs = Outs.size();
17275 
17276   std::optional<unsigned> FirstMaskArgument;
17277   if (Subtarget.hasVInstructions())
17278     FirstMaskArgument = preAssignMask(Outs);
17279 
17280   for (unsigned i = 0; i != NumArgs; i++) {
17281     MVT ArgVT = Outs[i].VT;
17282     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
17283     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
17284 
17285     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
17286     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
17287            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
17288            FirstMaskArgument)) {
17289       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
17290                         << ArgVT << "\n");
17291       llvm_unreachable(nullptr);
17292     }
17293   }
17294 }
17295 
17296 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
17297 // values.
17298 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
17299                                    const CCValAssign &VA, const SDLoc &DL,
17300                                    const RISCVSubtarget &Subtarget) {
17301   switch (VA.getLocInfo()) {
17302   default:
17303     llvm_unreachable("Unexpected CCValAssign::LocInfo");
17304   case CCValAssign::Full:
17305     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
17306       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
17307     break;
17308   case CCValAssign::BCvt:
17309     if (VA.getLocVT().isInteger() &&
17310         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
17311       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
17312     } else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
17313       if (RV64LegalI32) {
17314         Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Val);
17315         Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
17316       } else {
17317         Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
17318       }
17319     } else {
17320       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
17321     }
17322     break;
17323   }
17324   return Val;
17325 }
17326 
17327 // The caller is responsible for loading the full value if the argument is
17328 // passed with CCValAssign::Indirect.
17329 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
17330                                 const CCValAssign &VA, const SDLoc &DL,
17331                                 const ISD::InputArg &In,
17332                                 const RISCVTargetLowering &TLI) {
17333   MachineFunction &MF = DAG.getMachineFunction();
17334   MachineRegisterInfo &RegInfo = MF.getRegInfo();
17335   EVT LocVT = VA.getLocVT();
17336   SDValue Val;
17337   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
17338   Register VReg = RegInfo.createVirtualRegister(RC);
17339   RegInfo.addLiveIn(VA.getLocReg(), VReg);
17340   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
17341 
17342   // If input is sign extended from 32 bits, note it for the SExtWRemoval pass.
17343   if (In.isOrigArg()) {
17344     Argument *OrigArg = MF.getFunction().getArg(In.getOrigArgIndex());
17345     if (OrigArg->getType()->isIntegerTy()) {
17346       unsigned BitWidth = OrigArg->getType()->getIntegerBitWidth();
17347       // An input zero extended from i31 can also be considered sign extended.
17348       if ((BitWidth <= 32 && In.Flags.isSExt()) ||
17349           (BitWidth < 32 && In.Flags.isZExt())) {
17350         RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
17351         RVFI->addSExt32Register(VReg);
17352       }
17353     }
17354   }
17355 
17356   if (VA.getLocInfo() == CCValAssign::Indirect)
17357     return Val;
17358 
17359   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
17360 }
17361 
17362 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
17363                                    const CCValAssign &VA, const SDLoc &DL,
17364                                    const RISCVSubtarget &Subtarget) {
17365   EVT LocVT = VA.getLocVT();
17366 
17367   switch (VA.getLocInfo()) {
17368   default:
17369     llvm_unreachable("Unexpected CCValAssign::LocInfo");
17370   case CCValAssign::Full:
17371     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
17372       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
17373     break;
17374   case CCValAssign::BCvt:
17375     if (LocVT.isInteger() &&
17376         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
17377       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
17378     } else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32) {
17379       if (RV64LegalI32) {
17380         Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
17381         Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Val);
17382       } else {
17383         Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
17384       }
17385     } else {
17386       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
17387     }
17388     break;
17389   }
17390   return Val;
17391 }
17392 
17393 // The caller is responsible for loading the full value if the argument is
17394 // passed with CCValAssign::Indirect.
17395 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
17396                                 const CCValAssign &VA, const SDLoc &DL) {
17397   MachineFunction &MF = DAG.getMachineFunction();
17398   MachineFrameInfo &MFI = MF.getFrameInfo();
17399   EVT LocVT = VA.getLocVT();
17400   EVT ValVT = VA.getValVT();
17401   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
17402   if (ValVT.isScalableVector()) {
17403     // When the value is a scalable vector, we save the pointer which points to
17404     // the scalable vector value in the stack. The ValVT will be the pointer
17405     // type, instead of the scalable vector type.
17406     ValVT = LocVT;
17407   }
17408   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
17409                                  /*IsImmutable=*/true);
17410   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
17411   SDValue Val;
17412 
17413   ISD::LoadExtType ExtType;
17414   switch (VA.getLocInfo()) {
17415   default:
17416     llvm_unreachable("Unexpected CCValAssign::LocInfo");
17417   case CCValAssign::Full:
17418   case CCValAssign::Indirect:
17419   case CCValAssign::BCvt:
17420     ExtType = ISD::NON_EXTLOAD;
17421     break;
17422   }
17423   Val = DAG.getExtLoad(
17424       ExtType, DL, LocVT, Chain, FIN,
17425       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
17426   return Val;
17427 }
17428 
17429 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
17430                                        const CCValAssign &VA,
17431                                        const CCValAssign &HiVA,
17432                                        const SDLoc &DL) {
17433   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
17434          "Unexpected VA");
17435   MachineFunction &MF = DAG.getMachineFunction();
17436   MachineFrameInfo &MFI = MF.getFrameInfo();
17437   MachineRegisterInfo &RegInfo = MF.getRegInfo();
17438 
17439   assert(VA.isRegLoc() && "Expected register VA assignment");
17440 
17441   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
17442   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
17443   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
17444   SDValue Hi;
17445   if (HiVA.isMemLoc()) {
17446     // Second half of f64 is passed on the stack.
17447     int FI = MFI.CreateFixedObject(4, HiVA.getLocMemOffset(),
17448                                    /*IsImmutable=*/true);
17449     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
17450     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
17451                      MachinePointerInfo::getFixedStack(MF, FI));
17452   } else {
17453     // Second half of f64 is passed in another GPR.
17454     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
17455     RegInfo.addLiveIn(HiVA.getLocReg(), HiVReg);
17456     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
17457   }
17458   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
17459 }
17460 
17461 // FastCC has less than 1% performance improvement for some particular
17462 // benchmark. But theoretically, it may has benenfit for some cases.
17463 bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
17464                             unsigned ValNo, MVT ValVT, MVT LocVT,
17465                             CCValAssign::LocInfo LocInfo,
17466                             ISD::ArgFlagsTy ArgFlags, CCState &State,
17467                             bool IsFixed, bool IsRet, Type *OrigTy,
17468                             const RISCVTargetLowering &TLI,
17469                             std::optional<unsigned> FirstMaskArgument) {
17470 
17471   // X5 and X6 might be used for save-restore libcall.
17472   static const MCPhysReg GPRList[] = {
17473       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
17474       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
17475       RISCV::X29, RISCV::X30, RISCV::X31};
17476 
17477   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
17478     if (unsigned Reg = State.AllocateReg(GPRList)) {
17479       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17480       return false;
17481     }
17482   }
17483 
17484   const RISCVSubtarget &Subtarget = TLI.getSubtarget();
17485 
17486   if (LocVT == MVT::f16 &&
17487       (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZfhmin())) {
17488     static const MCPhysReg FPR16List[] = {
17489         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
17490         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
17491         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
17492         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
17493     if (unsigned Reg = State.AllocateReg(FPR16List)) {
17494       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17495       return false;
17496     }
17497   }
17498 
17499   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
17500     static const MCPhysReg FPR32List[] = {
17501         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
17502         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
17503         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
17504         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
17505     if (unsigned Reg = State.AllocateReg(FPR32List)) {
17506       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17507       return false;
17508     }
17509   }
17510 
17511   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
17512     static const MCPhysReg FPR64List[] = {
17513         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
17514         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
17515         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
17516         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
17517     if (unsigned Reg = State.AllocateReg(FPR64List)) {
17518       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17519       return false;
17520     }
17521   }
17522 
17523   // Check if there is an available GPR before hitting the stack.
17524   if ((LocVT == MVT::f16 &&
17525        (Subtarget.hasStdExtZhinx() || Subtarget.hasStdExtZhinxmin())) ||
17526       (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
17527       (LocVT == MVT::f64 && Subtarget.is64Bit() &&
17528        Subtarget.hasStdExtZdinx())) {
17529     if (unsigned Reg = State.AllocateReg(GPRList)) {
17530       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17531       return false;
17532     }
17533   }
17534 
17535   if (LocVT == MVT::f16) {
17536     unsigned Offset2 = State.AllocateStack(2, Align(2));
17537     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
17538     return false;
17539   }
17540 
17541   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
17542     unsigned Offset4 = State.AllocateStack(4, Align(4));
17543     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
17544     return false;
17545   }
17546 
17547   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
17548     unsigned Offset5 = State.AllocateStack(8, Align(8));
17549     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
17550     return false;
17551   }
17552 
17553   if (LocVT.isVector()) {
17554     if (unsigned Reg =
17555             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
17556       // Fixed-length vectors are located in the corresponding scalable-vector
17557       // container types.
17558       if (ValVT.isFixedLengthVector())
17559         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
17560       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17561     } else {
17562       // Try and pass the address via a "fast" GPR.
17563       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
17564         LocInfo = CCValAssign::Indirect;
17565         LocVT = TLI.getSubtarget().getXLenVT();
17566         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
17567       } else if (ValVT.isFixedLengthVector()) {
17568         auto StackAlign =
17569             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
17570         unsigned StackOffset =
17571             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
17572         State.addLoc(
17573             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
17574       } else {
17575         // Can't pass scalable vectors on the stack.
17576         return true;
17577       }
17578     }
17579 
17580     return false;
17581   }
17582 
17583   return true; // CC didn't match.
17584 }
17585 
17586 bool RISCV::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
17587                          CCValAssign::LocInfo LocInfo,
17588                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
17589   if (ArgFlags.isNest()) {
17590     report_fatal_error(
17591         "Attribute 'nest' is not supported in GHC calling convention");
17592   }
17593 
17594   static const MCPhysReg GPRList[] = {
17595       RISCV::X9,  RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
17596       RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
17597 
17598   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
17599     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
17600     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
17601     if (unsigned Reg = State.AllocateReg(GPRList)) {
17602       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17603       return false;
17604     }
17605   }
17606 
17607   const RISCVSubtarget &Subtarget =
17608       State.getMachineFunction().getSubtarget<RISCVSubtarget>();
17609 
17610   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
17611     // Pass in STG registers: F1, ..., F6
17612     //                        fs0 ... fs5
17613     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
17614                                           RISCV::F18_F, RISCV::F19_F,
17615                                           RISCV::F20_F, RISCV::F21_F};
17616     if (unsigned Reg = State.AllocateReg(FPR32List)) {
17617       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17618       return false;
17619     }
17620   }
17621 
17622   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
17623     // Pass in STG registers: D1, ..., D6
17624     //                        fs6 ... fs11
17625     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
17626                                           RISCV::F24_D, RISCV::F25_D,
17627                                           RISCV::F26_D, RISCV::F27_D};
17628     if (unsigned Reg = State.AllocateReg(FPR64List)) {
17629       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17630       return false;
17631     }
17632   }
17633 
17634   if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
17635       (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() &&
17636        Subtarget.is64Bit())) {
17637     if (unsigned Reg = State.AllocateReg(GPRList)) {
17638       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
17639       return false;
17640     }
17641   }
17642 
17643   report_fatal_error("No registers left in GHC calling convention");
17644   return true;
17645 }
17646 
17647 // Transform physical registers into virtual registers.
17648 SDValue RISCVTargetLowering::LowerFormalArguments(
17649     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
17650     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
17651     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
17652 
17653   MachineFunction &MF = DAG.getMachineFunction();
17654 
17655   switch (CallConv) {
17656   default:
17657     report_fatal_error("Unsupported calling convention");
17658   case CallingConv::C:
17659   case CallingConv::Fast:
17660   case CallingConv::SPIR_KERNEL:
17661   case CallingConv::GRAAL:
17662     break;
17663   case CallingConv::GHC:
17664     if (!Subtarget.hasStdExtFOrZfinx() || !Subtarget.hasStdExtDOrZdinx())
17665       report_fatal_error("GHC calling convention requires the (Zfinx/F) and "
17666                          "(Zdinx/D) instruction set extensions");
17667   }
17668 
17669   const Function &Func = MF.getFunction();
17670   if (Func.hasFnAttribute("interrupt")) {
17671     if (!Func.arg_empty())
17672       report_fatal_error(
17673         "Functions with the interrupt attribute cannot have arguments!");
17674 
17675     StringRef Kind =
17676       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
17677 
17678     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
17679       report_fatal_error(
17680         "Function interrupt attribute argument not supported!");
17681   }
17682 
17683   EVT PtrVT = getPointerTy(DAG.getDataLayout());
17684   MVT XLenVT = Subtarget.getXLenVT();
17685   unsigned XLenInBytes = Subtarget.getXLen() / 8;
17686   // Used with vargs to acumulate store chains.
17687   std::vector<SDValue> OutChains;
17688 
17689   // Assign locations to all of the incoming arguments.
17690   SmallVector<CCValAssign, 16> ArgLocs;
17691   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
17692 
17693   if (CallConv == CallingConv::GHC)
17694     CCInfo.AnalyzeFormalArguments(Ins, RISCV::CC_RISCV_GHC);
17695   else
17696     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
17697                      CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC
17698                                                    : RISCV::CC_RISCV);
17699 
17700   for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
17701     CCValAssign &VA = ArgLocs[i];
17702     SDValue ArgValue;
17703     // Passing f64 on RV32D with a soft float ABI must be handled as a special
17704     // case.
17705     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
17706       assert(VA.needsCustom());
17707       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, ArgLocs[++i], DL);
17708     } else if (VA.isRegLoc())
17709       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, Ins[InsIdx], *this);
17710     else
17711       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
17712 
17713     if (VA.getLocInfo() == CCValAssign::Indirect) {
17714       // If the original argument was split and passed by reference (e.g. i128
17715       // on RV32), we need to load all parts of it here (using the same
17716       // address). Vectors may be partly split to registers and partly to the
17717       // stack, in which case the base address is partly offset and subsequent
17718       // stores are relative to that.
17719       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
17720                                    MachinePointerInfo()));
17721       unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
17722       unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
17723       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
17724       while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
17725         CCValAssign &PartVA = ArgLocs[i + 1];
17726         unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
17727         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
17728         if (PartVA.getValVT().isScalableVector())
17729           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
17730         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
17731         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
17732                                      MachinePointerInfo()));
17733         ++i;
17734         ++InsIdx;
17735       }
17736       continue;
17737     }
17738     InVals.push_back(ArgValue);
17739   }
17740 
17741   if (any_of(ArgLocs,
17742              [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
17743     MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
17744 
17745   if (IsVarArg) {
17746     ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs();
17747     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
17748     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
17749     MachineFrameInfo &MFI = MF.getFrameInfo();
17750     MachineRegisterInfo &RegInfo = MF.getRegInfo();
17751     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
17752 
17753     // Size of the vararg save area. For now, the varargs save area is either
17754     // zero or large enough to hold a0-a7.
17755     int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
17756     int FI;
17757 
17758     // If all registers are allocated, then all varargs must be passed on the
17759     // stack and we don't need to save any argregs.
17760     if (VarArgsSaveSize == 0) {
17761       int VaArgOffset = CCInfo.getStackSize();
17762       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
17763     } else {
17764       int VaArgOffset = -VarArgsSaveSize;
17765       FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
17766 
17767       // If saving an odd number of registers then create an extra stack slot to
17768       // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
17769       // offsets to even-numbered registered remain 2*XLEN-aligned.
17770       if (Idx % 2) {
17771         MFI.CreateFixedObject(
17772             XLenInBytes, VaArgOffset - static_cast<int>(XLenInBytes), true);
17773         VarArgsSaveSize += XLenInBytes;
17774       }
17775 
17776       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
17777 
17778       // Copy the integer registers that may have been used for passing varargs
17779       // to the vararg save area.
17780       for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
17781         const Register Reg = RegInfo.createVirtualRegister(RC);
17782         RegInfo.addLiveIn(ArgRegs[I], Reg);
17783         SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
17784         SDValue Store = DAG.getStore(
17785             Chain, DL, ArgValue, FIN,
17786             MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes));
17787         OutChains.push_back(Store);
17788         FIN =
17789             DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(XLenInBytes), DL);
17790       }
17791     }
17792 
17793     // Record the frame index of the first variable argument
17794     // which is a value necessary to VASTART.
17795     RVFI->setVarArgsFrameIndex(FI);
17796     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
17797   }
17798 
17799   // All stores are grouped in one node to allow the matching between
17800   // the size of Ins and InVals. This only happens for vararg functions.
17801   if (!OutChains.empty()) {
17802     OutChains.push_back(Chain);
17803     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
17804   }
17805 
17806   return Chain;
17807 }
17808 
17809 /// isEligibleForTailCallOptimization - Check whether the call is eligible
17810 /// for tail call optimization.
17811 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
17812 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
17813     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
17814     const SmallVector<CCValAssign, 16> &ArgLocs) const {
17815 
17816   auto CalleeCC = CLI.CallConv;
17817   auto &Outs = CLI.Outs;
17818   auto &Caller = MF.getFunction();
17819   auto CallerCC = Caller.getCallingConv();
17820 
17821   // Exception-handling functions need a special set of instructions to
17822   // indicate a return to the hardware. Tail-calling another function would
17823   // probably break this.
17824   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
17825   // should be expanded as new function attributes are introduced.
17826   if (Caller.hasFnAttribute("interrupt"))
17827     return false;
17828 
17829   // Do not tail call opt if the stack is used to pass parameters.
17830   if (CCInfo.getStackSize() != 0)
17831     return false;
17832 
17833   // Do not tail call opt if any parameters need to be passed indirectly.
17834   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
17835   // passed indirectly. So the address of the value will be passed in a
17836   // register, or if not available, then the address is put on the stack. In
17837   // order to pass indirectly, space on the stack often needs to be allocated
17838   // in order to store the value. In this case the CCInfo.getNextStackOffset()
17839   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
17840   // are passed CCValAssign::Indirect.
17841   for (auto &VA : ArgLocs)
17842     if (VA.getLocInfo() == CCValAssign::Indirect)
17843       return false;
17844 
17845   // Do not tail call opt if either caller or callee uses struct return
17846   // semantics.
17847   auto IsCallerStructRet = Caller.hasStructRetAttr();
17848   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
17849   if (IsCallerStructRet || IsCalleeStructRet)
17850     return false;
17851 
17852   // The callee has to preserve all registers the caller needs to preserve.
17853   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
17854   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
17855   if (CalleeCC != CallerCC) {
17856     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
17857     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
17858       return false;
17859   }
17860 
17861   // Byval parameters hand the function a pointer directly into the stack area
17862   // we want to reuse during a tail call. Working around this *is* possible
17863   // but less efficient and uglier in LowerCall.
17864   for (auto &Arg : Outs)
17865     if (Arg.Flags.isByVal())
17866       return false;
17867 
17868   return true;
17869 }
17870 
17871 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
17872   return DAG.getDataLayout().getPrefTypeAlign(
17873       VT.getTypeForEVT(*DAG.getContext()));
17874 }
17875 
17876 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
17877 // and output parameter nodes.
17878 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
17879                                        SmallVectorImpl<SDValue> &InVals) const {
17880   SelectionDAG &DAG = CLI.DAG;
17881   SDLoc &DL = CLI.DL;
17882   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
17883   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
17884   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
17885   SDValue Chain = CLI.Chain;
17886   SDValue Callee = CLI.Callee;
17887   bool &IsTailCall = CLI.IsTailCall;
17888   CallingConv::ID CallConv = CLI.CallConv;
17889   bool IsVarArg = CLI.IsVarArg;
17890   EVT PtrVT = getPointerTy(DAG.getDataLayout());
17891   MVT XLenVT = Subtarget.getXLenVT();
17892 
17893   MachineFunction &MF = DAG.getMachineFunction();
17894 
17895   // Analyze the operands of the call, assigning locations to each operand.
17896   SmallVector<CCValAssign, 16> ArgLocs;
17897   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
17898 
17899   if (CallConv == CallingConv::GHC)
17900     ArgCCInfo.AnalyzeCallOperands(Outs, RISCV::CC_RISCV_GHC);
17901   else
17902     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
17903                       CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC
17904                                                     : RISCV::CC_RISCV);
17905 
17906   // Check if it's really possible to do a tail call.
17907   if (IsTailCall)
17908     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
17909 
17910   if (IsTailCall)
17911     ++NumTailCalls;
17912   else if (CLI.CB && CLI.CB->isMustTailCall())
17913     report_fatal_error("failed to perform tail call elimination on a call "
17914                        "site marked musttail");
17915 
17916   // Get a count of how many bytes are to be pushed on the stack.
17917   unsigned NumBytes = ArgCCInfo.getStackSize();
17918 
17919   // Create local copies for byval args
17920   SmallVector<SDValue, 8> ByValArgs;
17921   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
17922     ISD::ArgFlagsTy Flags = Outs[i].Flags;
17923     if (!Flags.isByVal())
17924       continue;
17925 
17926     SDValue Arg = OutVals[i];
17927     unsigned Size = Flags.getByValSize();
17928     Align Alignment = Flags.getNonZeroByValAlign();
17929 
17930     int FI =
17931         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
17932     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
17933     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
17934 
17935     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
17936                           /*IsVolatile=*/false,
17937                           /*AlwaysInline=*/false, IsTailCall,
17938                           MachinePointerInfo(), MachinePointerInfo());
17939     ByValArgs.push_back(FIPtr);
17940   }
17941 
17942   if (!IsTailCall)
17943     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
17944 
17945   // Copy argument values to their designated locations.
17946   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
17947   SmallVector<SDValue, 8> MemOpChains;
17948   SDValue StackPtr;
17949   for (unsigned i = 0, j = 0, e = ArgLocs.size(), OutIdx = 0; i != e;
17950        ++i, ++OutIdx) {
17951     CCValAssign &VA = ArgLocs[i];
17952     SDValue ArgValue = OutVals[OutIdx];
17953     ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
17954 
17955     // Handle passing f64 on RV32D with a soft float ABI as a special case.
17956     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
17957       assert(VA.isRegLoc() && "Expected register VA assignment");
17958       assert(VA.needsCustom());
17959       SDValue SplitF64 = DAG.getNode(
17960           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
17961       SDValue Lo = SplitF64.getValue(0);
17962       SDValue Hi = SplitF64.getValue(1);
17963 
17964       Register RegLo = VA.getLocReg();
17965       RegsToPass.push_back(std::make_pair(RegLo, Lo));
17966 
17967       // Get the CCValAssign for the Hi part.
17968       CCValAssign &HiVA = ArgLocs[++i];
17969 
17970       if (HiVA.isMemLoc()) {
17971         // Second half of f64 is passed on the stack.
17972         if (!StackPtr.getNode())
17973           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
17974         SDValue Address =
17975             DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
17976                         DAG.getIntPtrConstant(HiVA.getLocMemOffset(), DL));
17977         // Emit the store.
17978         MemOpChains.push_back(
17979             DAG.getStore(Chain, DL, Hi, Address, MachinePointerInfo()));
17980       } else {
17981         // Second half of f64 is passed in another GPR.
17982         Register RegHigh = HiVA.getLocReg();
17983         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
17984       }
17985       continue;
17986     }
17987 
17988     // Promote the value if needed.
17989     // For now, only handle fully promoted and indirect arguments.
17990     if (VA.getLocInfo() == CCValAssign::Indirect) {
17991       // Store the argument in a stack slot and pass its address.
17992       Align StackAlign =
17993           std::max(getPrefTypeAlign(Outs[OutIdx].ArgVT, DAG),
17994                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
17995       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
17996       // If the original argument was split (e.g. i128), we need
17997       // to store the required parts of it here (and pass just one address).
17998       // Vectors may be partly split to registers and partly to the stack, in
17999       // which case the base address is partly offset and subsequent stores are
18000       // relative to that.
18001       unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
18002       unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
18003       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
18004       // Calculate the total size to store. We don't have access to what we're
18005       // actually storing other than performing the loop and collecting the
18006       // info.
18007       SmallVector<std::pair<SDValue, SDValue>> Parts;
18008       while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
18009         SDValue PartValue = OutVals[OutIdx + 1];
18010         unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
18011         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
18012         EVT PartVT = PartValue.getValueType();
18013         if (PartVT.isScalableVector())
18014           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
18015         StoredSize += PartVT.getStoreSize();
18016         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
18017         Parts.push_back(std::make_pair(PartValue, Offset));
18018         ++i;
18019         ++OutIdx;
18020       }
18021       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
18022       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
18023       MemOpChains.push_back(
18024           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
18025                        MachinePointerInfo::getFixedStack(MF, FI)));
18026       for (const auto &Part : Parts) {
18027         SDValue PartValue = Part.first;
18028         SDValue PartOffset = Part.second;
18029         SDValue Address =
18030             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
18031         MemOpChains.push_back(
18032             DAG.getStore(Chain, DL, PartValue, Address,
18033                          MachinePointerInfo::getFixedStack(MF, FI)));
18034       }
18035       ArgValue = SpillSlot;
18036     } else {
18037       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
18038     }
18039 
18040     // Use local copy if it is a byval arg.
18041     if (Flags.isByVal())
18042       ArgValue = ByValArgs[j++];
18043 
18044     if (VA.isRegLoc()) {
18045       // Queue up the argument copies and emit them at the end.
18046       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
18047     } else {
18048       assert(VA.isMemLoc() && "Argument not register or memory");
18049       assert(!IsTailCall && "Tail call not allowed if stack is used "
18050                             "for passing parameters");
18051 
18052       // Work out the address of the stack slot.
18053       if (!StackPtr.getNode())
18054         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
18055       SDValue Address =
18056           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
18057                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
18058 
18059       // Emit the store.
18060       MemOpChains.push_back(
18061           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
18062     }
18063   }
18064 
18065   // Join the stores, which are independent of one another.
18066   if (!MemOpChains.empty())
18067     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
18068 
18069   SDValue Glue;
18070 
18071   // Build a sequence of copy-to-reg nodes, chained and glued together.
18072   for (auto &Reg : RegsToPass) {
18073     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
18074     Glue = Chain.getValue(1);
18075   }
18076 
18077   // Validate that none of the argument registers have been marked as
18078   // reserved, if so report an error. Do the same for the return address if this
18079   // is not a tailcall.
18080   validateCCReservedRegs(RegsToPass, MF);
18081   if (!IsTailCall &&
18082       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
18083     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
18084         MF.getFunction(),
18085         "Return address register required, but has been reserved."});
18086 
18087   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
18088   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
18089   // split it and then direct call can be matched by PseudoCALL.
18090   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
18091     const GlobalValue *GV = S->getGlobal();
18092 
18093     unsigned OpFlags = RISCVII::MO_CALL;
18094     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
18095       OpFlags = RISCVII::MO_PLT;
18096 
18097     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
18098   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
18099     unsigned OpFlags = RISCVII::MO_CALL;
18100 
18101     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
18102                                                  nullptr))
18103       OpFlags = RISCVII::MO_PLT;
18104 
18105     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
18106   }
18107 
18108   // The first call operand is the chain and the second is the target address.
18109   SmallVector<SDValue, 8> Ops;
18110   Ops.push_back(Chain);
18111   Ops.push_back(Callee);
18112 
18113   // Add argument registers to the end of the list so that they are
18114   // known live into the call.
18115   for (auto &Reg : RegsToPass)
18116     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
18117 
18118   if (!IsTailCall) {
18119     // Add a register mask operand representing the call-preserved registers.
18120     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
18121     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
18122     assert(Mask && "Missing call preserved mask for calling convention");
18123     Ops.push_back(DAG.getRegisterMask(Mask));
18124   }
18125 
18126   // Glue the call to the argument copies, if any.
18127   if (Glue.getNode())
18128     Ops.push_back(Glue);
18129 
18130   assert((!CLI.CFIType || CLI.CB->isIndirectCall()) &&
18131          "Unexpected CFI type for a direct call");
18132 
18133   // Emit the call.
18134   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18135 
18136   if (IsTailCall) {
18137     MF.getFrameInfo().setHasTailCall();
18138     SDValue Ret = DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
18139     if (CLI.CFIType)
18140       Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
18141     DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
18142     return Ret;
18143   }
18144 
18145   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
18146   if (CLI.CFIType)
18147     Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
18148   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
18149   Glue = Chain.getValue(1);
18150 
18151   // Mark the end of the call, which is glued to the call itself.
18152   Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, DL);
18153   Glue = Chain.getValue(1);
18154 
18155   // Assign locations to each value returned by this call.
18156   SmallVector<CCValAssign, 16> RVLocs;
18157   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
18158   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, RISCV::CC_RISCV);
18159 
18160   // Copy all of the result registers out of their specified physreg.
18161   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
18162     auto &VA = RVLocs[i];
18163     // Copy the value out
18164     SDValue RetValue =
18165         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
18166     // Glue the RetValue to the end of the call sequence
18167     Chain = RetValue.getValue(1);
18168     Glue = RetValue.getValue(2);
18169 
18170     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
18171       assert(VA.needsCustom());
18172       SDValue RetValue2 = DAG.getCopyFromReg(Chain, DL, RVLocs[++i].getLocReg(),
18173                                              MVT::i32, Glue);
18174       Chain = RetValue2.getValue(1);
18175       Glue = RetValue2.getValue(2);
18176       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
18177                              RetValue2);
18178     }
18179 
18180     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
18181 
18182     InVals.push_back(RetValue);
18183   }
18184 
18185   return Chain;
18186 }
18187 
18188 bool RISCVTargetLowering::CanLowerReturn(
18189     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
18190     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
18191   SmallVector<CCValAssign, 16> RVLocs;
18192   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
18193 
18194   std::optional<unsigned> FirstMaskArgument;
18195   if (Subtarget.hasVInstructions())
18196     FirstMaskArgument = preAssignMask(Outs);
18197 
18198   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
18199     MVT VT = Outs[i].VT;
18200     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
18201     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
18202     if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
18203                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
18204                  *this, FirstMaskArgument))
18205       return false;
18206   }
18207   return true;
18208 }
18209 
18210 SDValue
18211 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
18212                                  bool IsVarArg,
18213                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
18214                                  const SmallVectorImpl<SDValue> &OutVals,
18215                                  const SDLoc &DL, SelectionDAG &DAG) const {
18216   MachineFunction &MF = DAG.getMachineFunction();
18217   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
18218 
18219   // Stores the assignment of the return value to a location.
18220   SmallVector<CCValAssign, 16> RVLocs;
18221 
18222   // Info about the registers and stack slot.
18223   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
18224                  *DAG.getContext());
18225 
18226   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
18227                     nullptr, RISCV::CC_RISCV);
18228 
18229   if (CallConv == CallingConv::GHC && !RVLocs.empty())
18230     report_fatal_error("GHC functions return void only");
18231 
18232   SDValue Glue;
18233   SmallVector<SDValue, 4> RetOps(1, Chain);
18234 
18235   // Copy the result values into the output registers.
18236   for (unsigned i = 0, e = RVLocs.size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
18237     SDValue Val = OutVals[OutIdx];
18238     CCValAssign &VA = RVLocs[i];
18239     assert(VA.isRegLoc() && "Can only return in registers!");
18240 
18241     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
18242       // Handle returning f64 on RV32D with a soft float ABI.
18243       assert(VA.isRegLoc() && "Expected return via registers");
18244       assert(VA.needsCustom());
18245       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
18246                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
18247       SDValue Lo = SplitF64.getValue(0);
18248       SDValue Hi = SplitF64.getValue(1);
18249       Register RegLo = VA.getLocReg();
18250       Register RegHi = RVLocs[++i].getLocReg();
18251 
18252       if (STI.isRegisterReservedByUser(RegLo) ||
18253           STI.isRegisterReservedByUser(RegHi))
18254         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
18255             MF.getFunction(),
18256             "Return value register required, but has been reserved."});
18257 
18258       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
18259       Glue = Chain.getValue(1);
18260       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
18261       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
18262       Glue = Chain.getValue(1);
18263       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
18264     } else {
18265       // Handle a 'normal' return.
18266       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
18267       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
18268 
18269       if (STI.isRegisterReservedByUser(VA.getLocReg()))
18270         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
18271             MF.getFunction(),
18272             "Return value register required, but has been reserved."});
18273 
18274       // Guarantee that all emitted copies are stuck together.
18275       Glue = Chain.getValue(1);
18276       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
18277     }
18278   }
18279 
18280   RetOps[0] = Chain; // Update chain.
18281 
18282   // Add the glue node if we have it.
18283   if (Glue.getNode()) {
18284     RetOps.push_back(Glue);
18285   }
18286 
18287   if (any_of(RVLocs,
18288              [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
18289     MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
18290 
18291   unsigned RetOpc = RISCVISD::RET_GLUE;
18292   // Interrupt service routines use different return instructions.
18293   const Function &Func = DAG.getMachineFunction().getFunction();
18294   if (Func.hasFnAttribute("interrupt")) {
18295     if (!Func.getReturnType()->isVoidTy())
18296       report_fatal_error(
18297           "Functions with the interrupt attribute must have void return type!");
18298 
18299     MachineFunction &MF = DAG.getMachineFunction();
18300     StringRef Kind =
18301       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
18302 
18303     if (Kind == "supervisor")
18304       RetOpc = RISCVISD::SRET_GLUE;
18305     else
18306       RetOpc = RISCVISD::MRET_GLUE;
18307   }
18308 
18309   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
18310 }
18311 
18312 void RISCVTargetLowering::validateCCReservedRegs(
18313     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
18314     MachineFunction &MF) const {
18315   const Function &F = MF.getFunction();
18316   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
18317 
18318   if (llvm::any_of(Regs, [&STI](auto Reg) {
18319         return STI.isRegisterReservedByUser(Reg.first);
18320       }))
18321     F.getContext().diagnose(DiagnosticInfoUnsupported{
18322         F, "Argument register required, but has been reserved."});
18323 }
18324 
18325 // Check if the result of the node is only used as a return value, as
18326 // otherwise we can't perform a tail-call.
18327 bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
18328   if (N->getNumValues() != 1)
18329     return false;
18330   if (!N->hasNUsesOfValue(1, 0))
18331     return false;
18332 
18333   SDNode *Copy = *N->use_begin();
18334 
18335   if (Copy->getOpcode() == ISD::BITCAST) {
18336     return isUsedByReturnOnly(Copy, Chain);
18337   }
18338 
18339   // TODO: Handle additional opcodes in order to support tail-calling libcalls
18340   // with soft float ABIs.
18341   if (Copy->getOpcode() != ISD::CopyToReg) {
18342     return false;
18343   }
18344 
18345   // If the ISD::CopyToReg has a glue operand, we conservatively assume it
18346   // isn't safe to perform a tail call.
18347   if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
18348     return false;
18349 
18350   // The copy must be used by a RISCVISD::RET_GLUE, and nothing else.
18351   bool HasRet = false;
18352   for (SDNode *Node : Copy->uses()) {
18353     if (Node->getOpcode() != RISCVISD::RET_GLUE)
18354       return false;
18355     HasRet = true;
18356   }
18357   if (!HasRet)
18358     return false;
18359 
18360   Chain = Copy->getOperand(0);
18361   return true;
18362 }
18363 
18364 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
18365   return CI->isTailCall();
18366 }
18367 
18368 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
18369 #define NODE_NAME_CASE(NODE)                                                   \
18370   case RISCVISD::NODE:                                                         \
18371     return "RISCVISD::" #NODE;
18372   // clang-format off
18373   switch ((RISCVISD::NodeType)Opcode) {
18374   case RISCVISD::FIRST_NUMBER:
18375     break;
18376   NODE_NAME_CASE(RET_GLUE)
18377   NODE_NAME_CASE(SRET_GLUE)
18378   NODE_NAME_CASE(MRET_GLUE)
18379   NODE_NAME_CASE(CALL)
18380   NODE_NAME_CASE(SELECT_CC)
18381   NODE_NAME_CASE(BR_CC)
18382   NODE_NAME_CASE(BuildPairF64)
18383   NODE_NAME_CASE(SplitF64)
18384   NODE_NAME_CASE(TAIL)
18385   NODE_NAME_CASE(ADD_LO)
18386   NODE_NAME_CASE(HI)
18387   NODE_NAME_CASE(LLA)
18388   NODE_NAME_CASE(ADD_TPREL)
18389   NODE_NAME_CASE(MULHSU)
18390   NODE_NAME_CASE(SLLW)
18391   NODE_NAME_CASE(SRAW)
18392   NODE_NAME_CASE(SRLW)
18393   NODE_NAME_CASE(DIVW)
18394   NODE_NAME_CASE(DIVUW)
18395   NODE_NAME_CASE(REMUW)
18396   NODE_NAME_CASE(ROLW)
18397   NODE_NAME_CASE(RORW)
18398   NODE_NAME_CASE(CLZW)
18399   NODE_NAME_CASE(CTZW)
18400   NODE_NAME_CASE(ABSW)
18401   NODE_NAME_CASE(FMV_H_X)
18402   NODE_NAME_CASE(FMV_X_ANYEXTH)
18403   NODE_NAME_CASE(FMV_X_SIGNEXTH)
18404   NODE_NAME_CASE(FMV_W_X_RV64)
18405   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
18406   NODE_NAME_CASE(FCVT_X)
18407   NODE_NAME_CASE(FCVT_XU)
18408   NODE_NAME_CASE(FCVT_W_RV64)
18409   NODE_NAME_CASE(FCVT_WU_RV64)
18410   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
18411   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
18412   NODE_NAME_CASE(FP_ROUND_BF16)
18413   NODE_NAME_CASE(FP_EXTEND_BF16)
18414   NODE_NAME_CASE(FROUND)
18415   NODE_NAME_CASE(FCLASS)
18416   NODE_NAME_CASE(FMAX)
18417   NODE_NAME_CASE(FMIN)
18418   NODE_NAME_CASE(READ_CYCLE_WIDE)
18419   NODE_NAME_CASE(BREV8)
18420   NODE_NAME_CASE(ORC_B)
18421   NODE_NAME_CASE(ZIP)
18422   NODE_NAME_CASE(UNZIP)
18423   NODE_NAME_CASE(CLMUL)
18424   NODE_NAME_CASE(CLMULH)
18425   NODE_NAME_CASE(CLMULR)
18426   NODE_NAME_CASE(SHA256SIG0)
18427   NODE_NAME_CASE(SHA256SIG1)
18428   NODE_NAME_CASE(SHA256SUM0)
18429   NODE_NAME_CASE(SHA256SUM1)
18430   NODE_NAME_CASE(SM4KS)
18431   NODE_NAME_CASE(SM4ED)
18432   NODE_NAME_CASE(SM3P0)
18433   NODE_NAME_CASE(SM3P1)
18434   NODE_NAME_CASE(TH_LWD)
18435   NODE_NAME_CASE(TH_LWUD)
18436   NODE_NAME_CASE(TH_LDD)
18437   NODE_NAME_CASE(TH_SWD)
18438   NODE_NAME_CASE(TH_SDD)
18439   NODE_NAME_CASE(VMV_V_V_VL)
18440   NODE_NAME_CASE(VMV_V_X_VL)
18441   NODE_NAME_CASE(VFMV_V_F_VL)
18442   NODE_NAME_CASE(VMV_X_S)
18443   NODE_NAME_CASE(VMV_S_X_VL)
18444   NODE_NAME_CASE(VFMV_S_F_VL)
18445   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
18446   NODE_NAME_CASE(READ_VLENB)
18447   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
18448   NODE_NAME_CASE(VSLIDEUP_VL)
18449   NODE_NAME_CASE(VSLIDE1UP_VL)
18450   NODE_NAME_CASE(VSLIDEDOWN_VL)
18451   NODE_NAME_CASE(VSLIDE1DOWN_VL)
18452   NODE_NAME_CASE(VFSLIDE1UP_VL)
18453   NODE_NAME_CASE(VFSLIDE1DOWN_VL)
18454   NODE_NAME_CASE(VID_VL)
18455   NODE_NAME_CASE(VFNCVT_ROD_VL)
18456   NODE_NAME_CASE(VECREDUCE_ADD_VL)
18457   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
18458   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
18459   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
18460   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
18461   NODE_NAME_CASE(VECREDUCE_AND_VL)
18462   NODE_NAME_CASE(VECREDUCE_OR_VL)
18463   NODE_NAME_CASE(VECREDUCE_XOR_VL)
18464   NODE_NAME_CASE(VECREDUCE_FADD_VL)
18465   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
18466   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
18467   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
18468   NODE_NAME_CASE(ADD_VL)
18469   NODE_NAME_CASE(AND_VL)
18470   NODE_NAME_CASE(MUL_VL)
18471   NODE_NAME_CASE(OR_VL)
18472   NODE_NAME_CASE(SDIV_VL)
18473   NODE_NAME_CASE(SHL_VL)
18474   NODE_NAME_CASE(SREM_VL)
18475   NODE_NAME_CASE(SRA_VL)
18476   NODE_NAME_CASE(SRL_VL)
18477   NODE_NAME_CASE(ROTL_VL)
18478   NODE_NAME_CASE(ROTR_VL)
18479   NODE_NAME_CASE(SUB_VL)
18480   NODE_NAME_CASE(UDIV_VL)
18481   NODE_NAME_CASE(UREM_VL)
18482   NODE_NAME_CASE(XOR_VL)
18483   NODE_NAME_CASE(SADDSAT_VL)
18484   NODE_NAME_CASE(UADDSAT_VL)
18485   NODE_NAME_CASE(SSUBSAT_VL)
18486   NODE_NAME_CASE(USUBSAT_VL)
18487   NODE_NAME_CASE(FADD_VL)
18488   NODE_NAME_CASE(FSUB_VL)
18489   NODE_NAME_CASE(FMUL_VL)
18490   NODE_NAME_CASE(FDIV_VL)
18491   NODE_NAME_CASE(FNEG_VL)
18492   NODE_NAME_CASE(FABS_VL)
18493   NODE_NAME_CASE(FSQRT_VL)
18494   NODE_NAME_CASE(FCLASS_VL)
18495   NODE_NAME_CASE(VFMADD_VL)
18496   NODE_NAME_CASE(VFNMADD_VL)
18497   NODE_NAME_CASE(VFMSUB_VL)
18498   NODE_NAME_CASE(VFNMSUB_VL)
18499   NODE_NAME_CASE(VFWMADD_VL)
18500   NODE_NAME_CASE(VFWNMADD_VL)
18501   NODE_NAME_CASE(VFWMSUB_VL)
18502   NODE_NAME_CASE(VFWNMSUB_VL)
18503   NODE_NAME_CASE(FCOPYSIGN_VL)
18504   NODE_NAME_CASE(SMIN_VL)
18505   NODE_NAME_CASE(SMAX_VL)
18506   NODE_NAME_CASE(UMIN_VL)
18507   NODE_NAME_CASE(UMAX_VL)
18508   NODE_NAME_CASE(BITREVERSE_VL)
18509   NODE_NAME_CASE(BSWAP_VL)
18510   NODE_NAME_CASE(CTLZ_VL)
18511   NODE_NAME_CASE(CTTZ_VL)
18512   NODE_NAME_CASE(CTPOP_VL)
18513   NODE_NAME_CASE(VFMIN_VL)
18514   NODE_NAME_CASE(VFMAX_VL)
18515   NODE_NAME_CASE(MULHS_VL)
18516   NODE_NAME_CASE(MULHU_VL)
18517   NODE_NAME_CASE(VFCVT_RTZ_X_F_VL)
18518   NODE_NAME_CASE(VFCVT_RTZ_XU_F_VL)
18519   NODE_NAME_CASE(VFCVT_RM_X_F_VL)
18520   NODE_NAME_CASE(VFCVT_RM_XU_F_VL)
18521   NODE_NAME_CASE(VFCVT_X_F_VL)
18522   NODE_NAME_CASE(VFCVT_XU_F_VL)
18523   NODE_NAME_CASE(VFROUND_NOEXCEPT_VL)
18524   NODE_NAME_CASE(SINT_TO_FP_VL)
18525   NODE_NAME_CASE(UINT_TO_FP_VL)
18526   NODE_NAME_CASE(VFCVT_RM_F_XU_VL)
18527   NODE_NAME_CASE(VFCVT_RM_F_X_VL)
18528   NODE_NAME_CASE(FP_EXTEND_VL)
18529   NODE_NAME_CASE(FP_ROUND_VL)
18530   NODE_NAME_CASE(STRICT_FADD_VL)
18531   NODE_NAME_CASE(STRICT_FSUB_VL)
18532   NODE_NAME_CASE(STRICT_FMUL_VL)
18533   NODE_NAME_CASE(STRICT_FDIV_VL)
18534   NODE_NAME_CASE(STRICT_FSQRT_VL)
18535   NODE_NAME_CASE(STRICT_VFMADD_VL)
18536   NODE_NAME_CASE(STRICT_VFNMADD_VL)
18537   NODE_NAME_CASE(STRICT_VFMSUB_VL)
18538   NODE_NAME_CASE(STRICT_VFNMSUB_VL)
18539   NODE_NAME_CASE(STRICT_FP_ROUND_VL)
18540   NODE_NAME_CASE(STRICT_FP_EXTEND_VL)
18541   NODE_NAME_CASE(STRICT_VFNCVT_ROD_VL)
18542   NODE_NAME_CASE(STRICT_SINT_TO_FP_VL)
18543   NODE_NAME_CASE(STRICT_UINT_TO_FP_VL)
18544   NODE_NAME_CASE(STRICT_VFCVT_RM_X_F_VL)
18545   NODE_NAME_CASE(STRICT_VFCVT_RTZ_X_F_VL)
18546   NODE_NAME_CASE(STRICT_VFCVT_RTZ_XU_F_VL)
18547   NODE_NAME_CASE(STRICT_FSETCC_VL)
18548   NODE_NAME_CASE(STRICT_FSETCCS_VL)
18549   NODE_NAME_CASE(STRICT_VFROUND_NOEXCEPT_VL)
18550   NODE_NAME_CASE(VWMUL_VL)
18551   NODE_NAME_CASE(VWMULU_VL)
18552   NODE_NAME_CASE(VWMULSU_VL)
18553   NODE_NAME_CASE(VWADD_VL)
18554   NODE_NAME_CASE(VWADDU_VL)
18555   NODE_NAME_CASE(VWSUB_VL)
18556   NODE_NAME_CASE(VWSUBU_VL)
18557   NODE_NAME_CASE(VWADD_W_VL)
18558   NODE_NAME_CASE(VWADDU_W_VL)
18559   NODE_NAME_CASE(VWSUB_W_VL)
18560   NODE_NAME_CASE(VWSUBU_W_VL)
18561   NODE_NAME_CASE(VWSLL_VL)
18562   NODE_NAME_CASE(VFWMUL_VL)
18563   NODE_NAME_CASE(VFWADD_VL)
18564   NODE_NAME_CASE(VFWSUB_VL)
18565   NODE_NAME_CASE(VFWADD_W_VL)
18566   NODE_NAME_CASE(VFWSUB_W_VL)
18567   NODE_NAME_CASE(VWMACC_VL)
18568   NODE_NAME_CASE(VWMACCU_VL)
18569   NODE_NAME_CASE(VWMACCSU_VL)
18570   NODE_NAME_CASE(VNSRL_VL)
18571   NODE_NAME_CASE(SETCC_VL)
18572   NODE_NAME_CASE(VSELECT_VL)
18573   NODE_NAME_CASE(VP_MERGE_VL)
18574   NODE_NAME_CASE(VMAND_VL)
18575   NODE_NAME_CASE(VMOR_VL)
18576   NODE_NAME_CASE(VMXOR_VL)
18577   NODE_NAME_CASE(VMCLR_VL)
18578   NODE_NAME_CASE(VMSET_VL)
18579   NODE_NAME_CASE(VRGATHER_VX_VL)
18580   NODE_NAME_CASE(VRGATHER_VV_VL)
18581   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
18582   NODE_NAME_CASE(VSEXT_VL)
18583   NODE_NAME_CASE(VZEXT_VL)
18584   NODE_NAME_CASE(VCPOP_VL)
18585   NODE_NAME_CASE(VFIRST_VL)
18586   NODE_NAME_CASE(READ_CSR)
18587   NODE_NAME_CASE(WRITE_CSR)
18588   NODE_NAME_CASE(SWAP_CSR)
18589   NODE_NAME_CASE(CZERO_EQZ)
18590   NODE_NAME_CASE(CZERO_NEZ)
18591   }
18592   // clang-format on
18593   return nullptr;
18594 #undef NODE_NAME_CASE
18595 }
18596 
18597 /// getConstraintType - Given a constraint letter, return the type of
18598 /// constraint it is for this target.
18599 RISCVTargetLowering::ConstraintType
18600 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
18601   if (Constraint.size() == 1) {
18602     switch (Constraint[0]) {
18603     default:
18604       break;
18605     case 'f':
18606       return C_RegisterClass;
18607     case 'I':
18608     case 'J':
18609     case 'K':
18610       return C_Immediate;
18611     case 'A':
18612       return C_Memory;
18613     case 'S': // A symbolic address
18614       return C_Other;
18615     }
18616   } else {
18617     if (Constraint == "vr" || Constraint == "vm")
18618       return C_RegisterClass;
18619   }
18620   return TargetLowering::getConstraintType(Constraint);
18621 }
18622 
18623 std::pair<unsigned, const TargetRegisterClass *>
18624 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
18625                                                   StringRef Constraint,
18626                                                   MVT VT) const {
18627   // First, see if this is a constraint that directly corresponds to a RISC-V
18628   // register class.
18629   if (Constraint.size() == 1) {
18630     switch (Constraint[0]) {
18631     case 'r':
18632       // TODO: Support fixed vectors up to XLen for P extension?
18633       if (VT.isVector())
18634         break;
18635       if (VT == MVT::f16 && Subtarget.hasStdExtZhinxOrZhinxmin())
18636         return std::make_pair(0U, &RISCV::GPRF16RegClass);
18637       if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
18638         return std::make_pair(0U, &RISCV::GPRF32RegClass);
18639       if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
18640         return std::make_pair(0U, &RISCV::GPRPF64RegClass);
18641       return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
18642     case 'f':
18643       if (Subtarget.hasStdExtZfhOrZfhmin() && VT == MVT::f16)
18644         return std::make_pair(0U, &RISCV::FPR16RegClass);
18645       if (Subtarget.hasStdExtF() && VT == MVT::f32)
18646         return std::make_pair(0U, &RISCV::FPR32RegClass);
18647       if (Subtarget.hasStdExtD() && VT == MVT::f64)
18648         return std::make_pair(0U, &RISCV::FPR64RegClass);
18649       break;
18650     default:
18651       break;
18652     }
18653   } else if (Constraint == "vr") {
18654     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
18655                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
18656       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
18657         return std::make_pair(0U, RC);
18658     }
18659   } else if (Constraint == "vm") {
18660     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
18661       return std::make_pair(0U, &RISCV::VMV0RegClass);
18662   }
18663 
18664   // Clang will correctly decode the usage of register name aliases into their
18665   // official names. However, other frontends like `rustc` do not. This allows
18666   // users of these frontends to use the ABI names for registers in LLVM-style
18667   // register constraints.
18668   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
18669                                .Case("{zero}", RISCV::X0)
18670                                .Case("{ra}", RISCV::X1)
18671                                .Case("{sp}", RISCV::X2)
18672                                .Case("{gp}", RISCV::X3)
18673                                .Case("{tp}", RISCV::X4)
18674                                .Case("{t0}", RISCV::X5)
18675                                .Case("{t1}", RISCV::X6)
18676                                .Case("{t2}", RISCV::X7)
18677                                .Cases("{s0}", "{fp}", RISCV::X8)
18678                                .Case("{s1}", RISCV::X9)
18679                                .Case("{a0}", RISCV::X10)
18680                                .Case("{a1}", RISCV::X11)
18681                                .Case("{a2}", RISCV::X12)
18682                                .Case("{a3}", RISCV::X13)
18683                                .Case("{a4}", RISCV::X14)
18684                                .Case("{a5}", RISCV::X15)
18685                                .Case("{a6}", RISCV::X16)
18686                                .Case("{a7}", RISCV::X17)
18687                                .Case("{s2}", RISCV::X18)
18688                                .Case("{s3}", RISCV::X19)
18689                                .Case("{s4}", RISCV::X20)
18690                                .Case("{s5}", RISCV::X21)
18691                                .Case("{s6}", RISCV::X22)
18692                                .Case("{s7}", RISCV::X23)
18693                                .Case("{s8}", RISCV::X24)
18694                                .Case("{s9}", RISCV::X25)
18695                                .Case("{s10}", RISCV::X26)
18696                                .Case("{s11}", RISCV::X27)
18697                                .Case("{t3}", RISCV::X28)
18698                                .Case("{t4}", RISCV::X29)
18699                                .Case("{t5}", RISCV::X30)
18700                                .Case("{t6}", RISCV::X31)
18701                                .Default(RISCV::NoRegister);
18702   if (XRegFromAlias != RISCV::NoRegister)
18703     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
18704 
18705   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
18706   // TableGen record rather than the AsmName to choose registers for InlineAsm
18707   // constraints, plus we want to match those names to the widest floating point
18708   // register type available, manually select floating point registers here.
18709   //
18710   // The second case is the ABI name of the register, so that frontends can also
18711   // use the ABI names in register constraint lists.
18712   if (Subtarget.hasStdExtF()) {
18713     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
18714                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
18715                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
18716                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
18717                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
18718                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
18719                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
18720                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
18721                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
18722                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
18723                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
18724                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
18725                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
18726                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
18727                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
18728                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
18729                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
18730                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
18731                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
18732                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
18733                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
18734                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
18735                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
18736                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
18737                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
18738                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
18739                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
18740                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
18741                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
18742                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
18743                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
18744                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
18745                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
18746                         .Default(RISCV::NoRegister);
18747     if (FReg != RISCV::NoRegister) {
18748       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
18749       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
18750         unsigned RegNo = FReg - RISCV::F0_F;
18751         unsigned DReg = RISCV::F0_D + RegNo;
18752         return std::make_pair(DReg, &RISCV::FPR64RegClass);
18753       }
18754       if (VT == MVT::f32 || VT == MVT::Other)
18755         return std::make_pair(FReg, &RISCV::FPR32RegClass);
18756       if (Subtarget.hasStdExtZfhOrZfhmin() && VT == MVT::f16) {
18757         unsigned RegNo = FReg - RISCV::F0_F;
18758         unsigned HReg = RISCV::F0_H + RegNo;
18759         return std::make_pair(HReg, &RISCV::FPR16RegClass);
18760       }
18761     }
18762   }
18763 
18764   if (Subtarget.hasVInstructions()) {
18765     Register VReg = StringSwitch<Register>(Constraint.lower())
18766                         .Case("{v0}", RISCV::V0)
18767                         .Case("{v1}", RISCV::V1)
18768                         .Case("{v2}", RISCV::V2)
18769                         .Case("{v3}", RISCV::V3)
18770                         .Case("{v4}", RISCV::V4)
18771                         .Case("{v5}", RISCV::V5)
18772                         .Case("{v6}", RISCV::V6)
18773                         .Case("{v7}", RISCV::V7)
18774                         .Case("{v8}", RISCV::V8)
18775                         .Case("{v9}", RISCV::V9)
18776                         .Case("{v10}", RISCV::V10)
18777                         .Case("{v11}", RISCV::V11)
18778                         .Case("{v12}", RISCV::V12)
18779                         .Case("{v13}", RISCV::V13)
18780                         .Case("{v14}", RISCV::V14)
18781                         .Case("{v15}", RISCV::V15)
18782                         .Case("{v16}", RISCV::V16)
18783                         .Case("{v17}", RISCV::V17)
18784                         .Case("{v18}", RISCV::V18)
18785                         .Case("{v19}", RISCV::V19)
18786                         .Case("{v20}", RISCV::V20)
18787                         .Case("{v21}", RISCV::V21)
18788                         .Case("{v22}", RISCV::V22)
18789                         .Case("{v23}", RISCV::V23)
18790                         .Case("{v24}", RISCV::V24)
18791                         .Case("{v25}", RISCV::V25)
18792                         .Case("{v26}", RISCV::V26)
18793                         .Case("{v27}", RISCV::V27)
18794                         .Case("{v28}", RISCV::V28)
18795                         .Case("{v29}", RISCV::V29)
18796                         .Case("{v30}", RISCV::V30)
18797                         .Case("{v31}", RISCV::V31)
18798                         .Default(RISCV::NoRegister);
18799     if (VReg != RISCV::NoRegister) {
18800       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
18801         return std::make_pair(VReg, &RISCV::VMRegClass);
18802       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
18803         return std::make_pair(VReg, &RISCV::VRRegClass);
18804       for (const auto *RC :
18805            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
18806         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
18807           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
18808           return std::make_pair(VReg, RC);
18809         }
18810       }
18811     }
18812   }
18813 
18814   std::pair<Register, const TargetRegisterClass *> Res =
18815       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
18816 
18817   // If we picked one of the Zfinx register classes, remap it to the GPR class.
18818   // FIXME: When Zfinx is supported in CodeGen this will need to take the
18819   // Subtarget into account.
18820   if (Res.second == &RISCV::GPRF16RegClass ||
18821       Res.second == &RISCV::GPRF32RegClass ||
18822       Res.second == &RISCV::GPRPF64RegClass)
18823     return std::make_pair(Res.first, &RISCV::GPRRegClass);
18824 
18825   return Res;
18826 }
18827 
18828 InlineAsm::ConstraintCode
18829 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
18830   // Currently only support length 1 constraints.
18831   if (ConstraintCode.size() == 1) {
18832     switch (ConstraintCode[0]) {
18833     case 'A':
18834       return InlineAsm::ConstraintCode::A;
18835     default:
18836       break;
18837     }
18838   }
18839 
18840   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
18841 }
18842 
18843 void RISCVTargetLowering::LowerAsmOperandForConstraint(
18844     SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
18845     SelectionDAG &DAG) const {
18846   // Currently only support length 1 constraints.
18847   if (Constraint.size() == 1) {
18848     switch (Constraint[0]) {
18849     case 'I':
18850       // Validate & create a 12-bit signed immediate operand.
18851       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
18852         uint64_t CVal = C->getSExtValue();
18853         if (isInt<12>(CVal))
18854           Ops.push_back(
18855               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
18856       }
18857       return;
18858     case 'J':
18859       // Validate & create an integer zero operand.
18860       if (isNullConstant(Op))
18861         Ops.push_back(
18862             DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
18863       return;
18864     case 'K':
18865       // Validate & create a 5-bit unsigned immediate operand.
18866       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
18867         uint64_t CVal = C->getZExtValue();
18868         if (isUInt<5>(CVal))
18869           Ops.push_back(
18870               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
18871       }
18872       return;
18873     case 'S':
18874       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
18875         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
18876                                                  GA->getValueType(0)));
18877       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
18878         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
18879                                                 BA->getValueType(0)));
18880       }
18881       return;
18882     default:
18883       break;
18884     }
18885   }
18886   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
18887 }
18888 
18889 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
18890                                                    Instruction *Inst,
18891                                                    AtomicOrdering Ord) const {
18892   if (Subtarget.hasStdExtZtso()) {
18893     if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
18894       return Builder.CreateFence(Ord);
18895     return nullptr;
18896   }
18897 
18898   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
18899     return Builder.CreateFence(Ord);
18900   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
18901     return Builder.CreateFence(AtomicOrdering::Release);
18902   return nullptr;
18903 }
18904 
18905 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
18906                                                     Instruction *Inst,
18907                                                     AtomicOrdering Ord) const {
18908   if (Subtarget.hasStdExtZtso()) {
18909     if (isa<StoreInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
18910       return Builder.CreateFence(Ord);
18911     return nullptr;
18912   }
18913 
18914   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
18915     return Builder.CreateFence(AtomicOrdering::Acquire);
18916   if (Subtarget.enableSeqCstTrailingFence() && isa<StoreInst>(Inst) &&
18917       Ord == AtomicOrdering::SequentiallyConsistent)
18918     return Builder.CreateFence(AtomicOrdering::SequentiallyConsistent);
18919   return nullptr;
18920 }
18921 
18922 TargetLowering::AtomicExpansionKind
18923 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
18924   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
18925   // point operations can't be used in an lr/sc sequence without breaking the
18926   // forward-progress guarantee.
18927   if (AI->isFloatingPointOperation() ||
18928       AI->getOperation() == AtomicRMWInst::UIncWrap ||
18929       AI->getOperation() == AtomicRMWInst::UDecWrap)
18930     return AtomicExpansionKind::CmpXChg;
18931 
18932   // Don't expand forced atomics, we want to have __sync libcalls instead.
18933   if (Subtarget.hasForcedAtomics())
18934     return AtomicExpansionKind::None;
18935 
18936   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
18937   if (Size == 8 || Size == 16)
18938     return AtomicExpansionKind::MaskedIntrinsic;
18939   return AtomicExpansionKind::None;
18940 }
18941 
18942 static Intrinsic::ID
18943 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
18944   if (XLen == 32) {
18945     switch (BinOp) {
18946     default:
18947       llvm_unreachable("Unexpected AtomicRMW BinOp");
18948     case AtomicRMWInst::Xchg:
18949       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
18950     case AtomicRMWInst::Add:
18951       return Intrinsic::riscv_masked_atomicrmw_add_i32;
18952     case AtomicRMWInst::Sub:
18953       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
18954     case AtomicRMWInst::Nand:
18955       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
18956     case AtomicRMWInst::Max:
18957       return Intrinsic::riscv_masked_atomicrmw_max_i32;
18958     case AtomicRMWInst::Min:
18959       return Intrinsic::riscv_masked_atomicrmw_min_i32;
18960     case AtomicRMWInst::UMax:
18961       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
18962     case AtomicRMWInst::UMin:
18963       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
18964     }
18965   }
18966 
18967   if (XLen == 64) {
18968     switch (BinOp) {
18969     default:
18970       llvm_unreachable("Unexpected AtomicRMW BinOp");
18971     case AtomicRMWInst::Xchg:
18972       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
18973     case AtomicRMWInst::Add:
18974       return Intrinsic::riscv_masked_atomicrmw_add_i64;
18975     case AtomicRMWInst::Sub:
18976       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
18977     case AtomicRMWInst::Nand:
18978       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
18979     case AtomicRMWInst::Max:
18980       return Intrinsic::riscv_masked_atomicrmw_max_i64;
18981     case AtomicRMWInst::Min:
18982       return Intrinsic::riscv_masked_atomicrmw_min_i64;
18983     case AtomicRMWInst::UMax:
18984       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
18985     case AtomicRMWInst::UMin:
18986       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
18987     }
18988   }
18989 
18990   llvm_unreachable("Unexpected XLen\n");
18991 }
18992 
18993 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
18994     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
18995     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
18996   // In the case of an atomicrmw xchg with a constant 0/-1 operand, replace
18997   // the atomic instruction with an AtomicRMWInst::And/Or with appropriate
18998   // mask, as this produces better code than the LR/SC loop emitted by
18999   // int_riscv_masked_atomicrmw_xchg.
19000   if (AI->getOperation() == AtomicRMWInst::Xchg &&
19001       isa<ConstantInt>(AI->getValOperand())) {
19002     ConstantInt *CVal = cast<ConstantInt>(AI->getValOperand());
19003     if (CVal->isZero())
19004       return Builder.CreateAtomicRMW(AtomicRMWInst::And, AlignedAddr,
19005                                      Builder.CreateNot(Mask, "Inv_Mask"),
19006                                      AI->getAlign(), Ord);
19007     if (CVal->isMinusOne())
19008       return Builder.CreateAtomicRMW(AtomicRMWInst::Or, AlignedAddr, Mask,
19009                                      AI->getAlign(), Ord);
19010   }
19011 
19012   unsigned XLen = Subtarget.getXLen();
19013   Value *Ordering =
19014       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
19015   Type *Tys[] = {AlignedAddr->getType()};
19016   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
19017       AI->getModule(),
19018       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
19019 
19020   if (XLen == 64) {
19021     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
19022     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
19023     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
19024   }
19025 
19026   Value *Result;
19027 
19028   // Must pass the shift amount needed to sign extend the loaded value prior
19029   // to performing a signed comparison for min/max. ShiftAmt is the number of
19030   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
19031   // is the number of bits to left+right shift the value in order to
19032   // sign-extend.
19033   if (AI->getOperation() == AtomicRMWInst::Min ||
19034       AI->getOperation() == AtomicRMWInst::Max) {
19035     const DataLayout &DL = AI->getModule()->getDataLayout();
19036     unsigned ValWidth =
19037         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
19038     Value *SextShamt =
19039         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
19040     Result = Builder.CreateCall(LrwOpScwLoop,
19041                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
19042   } else {
19043     Result =
19044         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
19045   }
19046 
19047   if (XLen == 64)
19048     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
19049   return Result;
19050 }
19051 
19052 TargetLowering::AtomicExpansionKind
19053 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
19054     AtomicCmpXchgInst *CI) const {
19055   // Don't expand forced atomics, we want to have __sync libcalls instead.
19056   if (Subtarget.hasForcedAtomics())
19057     return AtomicExpansionKind::None;
19058 
19059   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
19060   if (Size == 8 || Size == 16)
19061     return AtomicExpansionKind::MaskedIntrinsic;
19062   return AtomicExpansionKind::None;
19063 }
19064 
19065 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
19066     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
19067     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
19068   unsigned XLen = Subtarget.getXLen();
19069   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
19070   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
19071   if (XLen == 64) {
19072     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
19073     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
19074     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
19075     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
19076   }
19077   Type *Tys[] = {AlignedAddr->getType()};
19078   Function *MaskedCmpXchg =
19079       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
19080   Value *Result = Builder.CreateCall(
19081       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
19082   if (XLen == 64)
19083     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
19084   return Result;
19085 }
19086 
19087 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
19088                                                         EVT DataVT) const {
19089   // We have indexed loads for all legal index types.  Indices are always
19090   // zero extended
19091   return Extend.getOpcode() == ISD::ZERO_EXTEND &&
19092     isTypeLegal(Extend.getValueType()) &&
19093     isTypeLegal(Extend.getOperand(0).getValueType());
19094 }
19095 
19096 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
19097                                                EVT VT) const {
19098   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
19099     return false;
19100 
19101   switch (FPVT.getSimpleVT().SimpleTy) {
19102   case MVT::f16:
19103     return Subtarget.hasStdExtZfhOrZfhmin();
19104   case MVT::f32:
19105     return Subtarget.hasStdExtF();
19106   case MVT::f64:
19107     return Subtarget.hasStdExtD();
19108   default:
19109     return false;
19110   }
19111 }
19112 
19113 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
19114   // If we are using the small code model, we can reduce size of jump table
19115   // entry to 4 bytes.
19116   if (Subtarget.is64Bit() && !isPositionIndependent() &&
19117       getTargetMachine().getCodeModel() == CodeModel::Small) {
19118     return MachineJumpTableInfo::EK_Custom32;
19119   }
19120   return TargetLowering::getJumpTableEncoding();
19121 }
19122 
19123 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
19124     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
19125     unsigned uid, MCContext &Ctx) const {
19126   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
19127          getTargetMachine().getCodeModel() == CodeModel::Small);
19128   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
19129 }
19130 
19131 bool RISCVTargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
19132   // We define vscale to be VLEN/RVVBitsPerBlock.  VLEN is always a power
19133   // of two >= 64, and RVVBitsPerBlock is 64.  Thus, vscale must be
19134   // a power of two as well.
19135   // FIXME: This doesn't work for zve32, but that's already broken
19136   // elsewhere for the same reason.
19137   assert(Subtarget.getRealMinVLen() >= 64 && "zve32* unsupported");
19138   static_assert(RISCV::RVVBitsPerBlock == 64,
19139                 "RVVBitsPerBlock changed, audit needed");
19140   return true;
19141 }
19142 
19143 bool RISCVTargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
19144                                                  SDValue &Offset,
19145                                                  ISD::MemIndexedMode &AM,
19146                                                  bool &IsInc,
19147                                                  SelectionDAG &DAG) const {
19148   // Target does not support indexed loads.
19149   if (!Subtarget.hasVendorXTHeadMemIdx())
19150     return false;
19151 
19152   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
19153     return false;
19154 
19155   Base = Op->getOperand(0);
19156   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
19157     int64_t RHSC = RHS->getSExtValue();
19158     if (Op->getOpcode() == ISD::SUB)
19159       RHSC = -(uint64_t)RHSC;
19160 
19161     // The constants that can be encoded in the THeadMemIdx instructions
19162     // are of the form (sign_extend(imm5) << imm2).
19163     bool isLegalIndexedOffset = false;
19164     for (unsigned i = 0; i < 4; i++)
19165       if (isInt<5>(RHSC >> i) && ((RHSC % (1LL << i)) == 0)) {
19166         isLegalIndexedOffset = true;
19167         break;
19168       }
19169 
19170     if (!isLegalIndexedOffset)
19171       return false;
19172 
19173     IsInc = (Op->getOpcode() == ISD::ADD);
19174     Offset = Op->getOperand(1);
19175     return true;
19176   }
19177 
19178   return false;
19179 }
19180 
19181 bool RISCVTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
19182                                                     SDValue &Offset,
19183                                                     ISD::MemIndexedMode &AM,
19184                                                     SelectionDAG &DAG) const {
19185   EVT VT;
19186   SDValue Ptr;
19187   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19188     VT = LD->getMemoryVT();
19189     Ptr = LD->getBasePtr();
19190   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19191     VT = ST->getMemoryVT();
19192     Ptr = ST->getBasePtr();
19193   } else
19194     return false;
19195 
19196   bool IsInc;
19197   if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
19198     return false;
19199 
19200   AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
19201   return true;
19202 }
19203 
19204 bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
19205                                                      SDValue &Base,
19206                                                      SDValue &Offset,
19207                                                      ISD::MemIndexedMode &AM,
19208                                                      SelectionDAG &DAG) const {
19209   EVT VT;
19210   SDValue Ptr;
19211   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
19212     VT = LD->getMemoryVT();
19213     Ptr = LD->getBasePtr();
19214   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
19215     VT = ST->getMemoryVT();
19216     Ptr = ST->getBasePtr();
19217   } else
19218     return false;
19219 
19220   bool IsInc;
19221   if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
19222     return false;
19223   // Post-indexing updates the base, so it's not a valid transform
19224   // if that's not the same as the load's pointer.
19225   if (Ptr != Base)
19226     return false;
19227 
19228   AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
19229   return true;
19230 }
19231 
19232 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
19233                                                      EVT VT) const {
19234   EVT SVT = VT.getScalarType();
19235 
19236   if (!SVT.isSimple())
19237     return false;
19238 
19239   switch (SVT.getSimpleVT().SimpleTy) {
19240   case MVT::f16:
19241     return VT.isVector() ? Subtarget.hasVInstructionsF16()
19242                          : Subtarget.hasStdExtZfhOrZhinx();
19243   case MVT::f32:
19244     return Subtarget.hasStdExtFOrZfinx();
19245   case MVT::f64:
19246     return Subtarget.hasStdExtDOrZdinx();
19247   default:
19248     break;
19249   }
19250 
19251   return false;
19252 }
19253 
19254 Register RISCVTargetLowering::getExceptionPointerRegister(
19255     const Constant *PersonalityFn) const {
19256   return RISCV::X10;
19257 }
19258 
19259 Register RISCVTargetLowering::getExceptionSelectorRegister(
19260     const Constant *PersonalityFn) const {
19261   return RISCV::X11;
19262 }
19263 
19264 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
19265   // Return false to suppress the unnecessary extensions if the LibCall
19266   // arguments or return value is a float narrower than XLEN on a soft FP ABI.
19267   if (Subtarget.isSoftFPABI() && (Type.isFloatingPoint() && !Type.isVector() &&
19268                                   Type.getSizeInBits() < Subtarget.getXLen()))
19269     return false;
19270 
19271   return true;
19272 }
19273 
19274 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
19275   if (Subtarget.is64Bit() && Type == MVT::i32)
19276     return true;
19277 
19278   return IsSigned;
19279 }
19280 
19281 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
19282                                                  SDValue C) const {
19283   // Check integral scalar types.
19284   const bool HasExtMOrZmmul =
19285       Subtarget.hasStdExtM() || Subtarget.hasStdExtZmmul();
19286   if (!VT.isScalarInteger())
19287     return false;
19288 
19289   // Omit the optimization if the sub target has the M extension and the data
19290   // size exceeds XLen.
19291   if (HasExtMOrZmmul && VT.getSizeInBits() > Subtarget.getXLen())
19292     return false;
19293 
19294   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
19295     // Break the MUL to a SLLI and an ADD/SUB.
19296     const APInt &Imm = ConstNode->getAPIntValue();
19297     if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
19298         (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
19299       return true;
19300 
19301     // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
19302     if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
19303         ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
19304          (Imm - 8).isPowerOf2()))
19305       return true;
19306 
19307     // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
19308     // a pair of LUI/ADDI.
19309     if (!Imm.isSignedIntN(12) && Imm.countr_zero() < 12 &&
19310         ConstNode->hasOneUse()) {
19311       APInt ImmS = Imm.ashr(Imm.countr_zero());
19312       if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
19313           (1 - ImmS).isPowerOf2())
19314         return true;
19315     }
19316   }
19317 
19318   return false;
19319 }
19320 
19321 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
19322                                                       SDValue ConstNode) const {
19323   // Let the DAGCombiner decide for vectors.
19324   EVT VT = AddNode.getValueType();
19325   if (VT.isVector())
19326     return true;
19327 
19328   // Let the DAGCombiner decide for larger types.
19329   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
19330     return true;
19331 
19332   // It is worse if c1 is simm12 while c1*c2 is not.
19333   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
19334   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
19335   const APInt &C1 = C1Node->getAPIntValue();
19336   const APInt &C2 = C2Node->getAPIntValue();
19337   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
19338     return false;
19339 
19340   // Default to true and let the DAGCombiner decide.
19341   return true;
19342 }
19343 
19344 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
19345     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
19346     unsigned *Fast) const {
19347   if (!VT.isVector()) {
19348     if (Fast)
19349       *Fast = Subtarget.hasFastUnalignedAccess();
19350     return Subtarget.hasFastUnalignedAccess();
19351   }
19352 
19353   // All vector implementations must support element alignment
19354   EVT ElemVT = VT.getVectorElementType();
19355   if (Alignment >= ElemVT.getStoreSize()) {
19356     if (Fast)
19357       *Fast = 1;
19358     return true;
19359   }
19360 
19361   // Note: We lower an unmasked unaligned vector access to an equally sized
19362   // e8 element type access.  Given this, we effectively support all unmasked
19363   // misaligned accesses.  TODO: Work through the codegen implications of
19364   // allowing such accesses to be formed, and considered fast.
19365   if (Fast)
19366     *Fast = Subtarget.hasFastUnalignedAccess();
19367   return Subtarget.hasFastUnalignedAccess();
19368 }
19369 
19370 
19371 EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
19372                                              const AttributeList &FuncAttributes) const {
19373   if (!Subtarget.hasVInstructions())
19374     return MVT::Other;
19375 
19376   if (FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat))
19377     return MVT::Other;
19378 
19379   // We use LMUL1 memory operations here for a non-obvious reason.  Our caller
19380   // has an expansion threshold, and we want the number of hardware memory
19381   // operations to correspond roughly to that threshold.  LMUL>1 operations
19382   // are typically expanded linearly internally, and thus correspond to more
19383   // than one actual memory operation.  Note that store merging and load
19384   // combining will typically form larger LMUL operations from the LMUL1
19385   // operations emitted here, and that's okay because combining isn't
19386   // introducing new memory operations; it's just merging existing ones.
19387   const unsigned MinVLenInBytes = Subtarget.getRealMinVLen()/8;
19388   if (Op.size() < MinVLenInBytes)
19389     // TODO: Figure out short memops.  For the moment, do the default thing
19390     // which ends up using scalar sequences.
19391     return MVT::Other;
19392 
19393   // Prefer i8 for non-zero memset as it allows us to avoid materializing
19394   // a large scalar constant and instead use vmv.v.x/i to do the
19395   // broadcast.  For everything else, prefer ELenVT to minimize VL and thus
19396   // maximize the chance we can encode the size in the vsetvli.
19397   MVT ELenVT = MVT::getIntegerVT(Subtarget.getELen());
19398   MVT PreferredVT = (Op.isMemset() && !Op.isZeroMemset()) ? MVT::i8 : ELenVT;
19399 
19400   // Do we have sufficient alignment for our preferred VT?  If not, revert
19401   // to largest size allowed by our alignment criteria.
19402   if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
19403     Align RequiredAlign(PreferredVT.getStoreSize());
19404     if (Op.isFixedDstAlign())
19405       RequiredAlign = std::min(RequiredAlign, Op.getDstAlign());
19406     if (Op.isMemcpy())
19407       RequiredAlign = std::min(RequiredAlign, Op.getSrcAlign());
19408     PreferredVT = MVT::getIntegerVT(RequiredAlign.value() * 8);
19409   }
19410   return MVT::getVectorVT(PreferredVT, MinVLenInBytes/PreferredVT.getStoreSize());
19411 }
19412 
19413 bool RISCVTargetLowering::splitValueIntoRegisterParts(
19414     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
19415     unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
19416   bool IsABIRegCopy = CC.has_value();
19417   EVT ValueVT = Val.getValueType();
19418   if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
19419       PartVT == MVT::f32) {
19420     // Cast the [b]f16 to i16, extend to i32, pad with ones to make a float
19421     // nan, and cast to f32.
19422     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
19423     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
19424     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
19425                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
19426     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
19427     Parts[0] = Val;
19428     return true;
19429   }
19430 
19431   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
19432     LLVMContext &Context = *DAG.getContext();
19433     EVT ValueEltVT = ValueVT.getVectorElementType();
19434     EVT PartEltVT = PartVT.getVectorElementType();
19435     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
19436     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
19437     if (PartVTBitSize % ValueVTBitSize == 0) {
19438       assert(PartVTBitSize >= ValueVTBitSize);
19439       // If the element types are different, bitcast to the same element type of
19440       // PartVT first.
19441       // Give an example here, we want copy a <vscale x 1 x i8> value to
19442       // <vscale x 4 x i16>.
19443       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
19444       // subvector, then we can bitcast to <vscale x 4 x i16>.
19445       if (ValueEltVT != PartEltVT) {
19446         if (PartVTBitSize > ValueVTBitSize) {
19447           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
19448           assert(Count != 0 && "The number of element should not be zero.");
19449           EVT SameEltTypeVT =
19450               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
19451           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
19452                             DAG.getUNDEF(SameEltTypeVT), Val,
19453                             DAG.getVectorIdxConstant(0, DL));
19454         }
19455         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
19456       } else {
19457         Val =
19458             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
19459                         Val, DAG.getVectorIdxConstant(0, DL));
19460       }
19461       Parts[0] = Val;
19462       return true;
19463     }
19464   }
19465   return false;
19466 }
19467 
19468 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
19469     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
19470     MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
19471   bool IsABIRegCopy = CC.has_value();
19472   if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
19473       PartVT == MVT::f32) {
19474     SDValue Val = Parts[0];
19475 
19476     // Cast the f32 to i32, truncate to i16, and cast back to [b]f16.
19477     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
19478     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
19479     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
19480     return Val;
19481   }
19482 
19483   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
19484     LLVMContext &Context = *DAG.getContext();
19485     SDValue Val = Parts[0];
19486     EVT ValueEltVT = ValueVT.getVectorElementType();
19487     EVT PartEltVT = PartVT.getVectorElementType();
19488     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
19489     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
19490     if (PartVTBitSize % ValueVTBitSize == 0) {
19491       assert(PartVTBitSize >= ValueVTBitSize);
19492       EVT SameEltTypeVT = ValueVT;
19493       // If the element types are different, convert it to the same element type
19494       // of PartVT.
19495       // Give an example here, we want copy a <vscale x 1 x i8> value from
19496       // <vscale x 4 x i16>.
19497       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
19498       // then we can extract <vscale x 1 x i8>.
19499       if (ValueEltVT != PartEltVT) {
19500         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
19501         assert(Count != 0 && "The number of element should not be zero.");
19502         SameEltTypeVT =
19503             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
19504         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
19505       }
19506       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
19507                         DAG.getVectorIdxConstant(0, DL));
19508       return Val;
19509     }
19510   }
19511   return SDValue();
19512 }
19513 
19514 bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
19515   // When aggressively optimizing for code size, we prefer to use a div
19516   // instruction, as it is usually smaller than the alternative sequence.
19517   // TODO: Add vector division?
19518   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
19519   return OptSize && !VT.isVector();
19520 }
19521 
19522 bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {
19523   // Scalarize zero_ext and sign_ext might stop match to widening instruction in
19524   // some situation.
19525   unsigned Opc = N->getOpcode();
19526   if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND)
19527     return false;
19528   return true;
19529 }
19530 
19531 static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {
19532   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
19533   Function *ThreadPointerFunc =
19534       Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
19535   return IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
19536                                 IRB.CreateCall(ThreadPointerFunc), Offset);
19537 }
19538 
19539 Value *RISCVTargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
19540   // Fuchsia provides a fixed TLS slot for the stack cookie.
19541   // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
19542   if (Subtarget.isTargetFuchsia())
19543     return useTpOffset(IRB, -0x10);
19544 
19545   return TargetLowering::getIRStackGuard(IRB);
19546 }
19547 
19548 bool RISCVTargetLowering::isLegalInterleavedAccessType(
19549     VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace,
19550     const DataLayout &DL) const {
19551   EVT VT = getValueType(DL, VTy);
19552   // Don't lower vlseg/vsseg for vector types that can't be split.
19553   if (!isTypeLegal(VT))
19554     return false;
19555 
19556   if (!isLegalElementTypeForRVV(VT.getScalarType()) ||
19557       !allowsMemoryAccessForAlignment(VTy->getContext(), DL, VT, AddrSpace,
19558                                       Alignment))
19559     return false;
19560 
19561   MVT ContainerVT = VT.getSimpleVT();
19562 
19563   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
19564     if (!Subtarget.useRVVForFixedLengthVectors())
19565       return false;
19566     // Sometimes the interleaved access pass picks up splats as interleaves of
19567     // one element. Don't lower these.
19568     if (FVTy->getNumElements() < 2)
19569       return false;
19570 
19571     ContainerVT = getContainerForFixedLengthVector(VT.getSimpleVT());
19572   }
19573 
19574   // Need to make sure that EMUL * NFIELDS ≤ 8
19575   auto [LMUL, Fractional] = RISCVVType::decodeVLMUL(getLMUL(ContainerVT));
19576   if (Fractional)
19577     return true;
19578   return Factor * LMUL <= 8;
19579 }
19580 
19581 bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
19582                                                   Align Alignment) const {
19583   if (!Subtarget.hasVInstructions())
19584     return false;
19585 
19586   // Only support fixed vectors if we know the minimum vector size.
19587   if (DataType.isFixedLengthVector() && !Subtarget.useRVVForFixedLengthVectors())
19588     return false;
19589 
19590   EVT ScalarType = DataType.getScalarType();
19591   if (!isLegalElementTypeForRVV(ScalarType))
19592     return false;
19593 
19594   if (!Subtarget.hasFastUnalignedAccess() &&
19595       Alignment < ScalarType.getStoreSize())
19596     return false;
19597 
19598   return true;
19599 }
19600 
19601 static const Intrinsic::ID FixedVlsegIntrIds[] = {
19602     Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
19603     Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
19604     Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
19605     Intrinsic::riscv_seg8_load};
19606 
19607 /// Lower an interleaved load into a vlsegN intrinsic.
19608 ///
19609 /// E.g. Lower an interleaved load (Factor = 2):
19610 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
19611 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
19612 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
19613 ///
19614 /// Into:
19615 /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.riscv.seg2.load.v4i32.p0.i64(
19616 ///                                        %ptr, i64 4)
19617 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
19618 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
19619 bool RISCVTargetLowering::lowerInterleavedLoad(
19620     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
19621     ArrayRef<unsigned> Indices, unsigned Factor) const {
19622   IRBuilder<> Builder(LI);
19623 
19624   auto *VTy = cast<FixedVectorType>(Shuffles[0]->getType());
19625   if (!isLegalInterleavedAccessType(VTy, Factor, LI->getAlign(),
19626                                     LI->getPointerAddressSpace(),
19627                                     LI->getModule()->getDataLayout()))
19628     return false;
19629 
19630   auto *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
19631 
19632   Function *VlsegNFunc =
19633       Intrinsic::getDeclaration(LI->getModule(), FixedVlsegIntrIds[Factor - 2],
19634                                 {VTy, LI->getPointerOperandType(), XLenTy});
19635 
19636   Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
19637 
19638   CallInst *VlsegN =
19639       Builder.CreateCall(VlsegNFunc, {LI->getPointerOperand(), VL});
19640 
19641   for (unsigned i = 0; i < Shuffles.size(); i++) {
19642     Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
19643     Shuffles[i]->replaceAllUsesWith(SubVec);
19644   }
19645 
19646   return true;
19647 }
19648 
19649 static const Intrinsic::ID FixedVssegIntrIds[] = {
19650     Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
19651     Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
19652     Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
19653     Intrinsic::riscv_seg8_store};
19654 
19655 /// Lower an interleaved store into a vssegN intrinsic.
19656 ///
19657 /// E.g. Lower an interleaved store (Factor = 3):
19658 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
19659 ///                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
19660 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
19661 ///
19662 /// Into:
19663 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
19664 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
19665 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
19666 /// call void llvm.riscv.seg3.store.v4i32.p0.i64(%sub.v0, %sub.v1, %sub.v2,
19667 ///                                              %ptr, i32 4)
19668 ///
19669 /// Note that the new shufflevectors will be removed and we'll only generate one
19670 /// vsseg3 instruction in CodeGen.
19671 bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
19672                                                 ShuffleVectorInst *SVI,
19673                                                 unsigned Factor) const {
19674   IRBuilder<> Builder(SI);
19675   auto *ShuffleVTy = cast<FixedVectorType>(SVI->getType());
19676   // Given SVI : <n*factor x ty>, then VTy : <n x ty>
19677   auto *VTy = FixedVectorType::get(ShuffleVTy->getElementType(),
19678                                    ShuffleVTy->getNumElements() / Factor);
19679   if (!isLegalInterleavedAccessType(VTy, Factor, SI->getAlign(),
19680                                     SI->getPointerAddressSpace(),
19681                                     SI->getModule()->getDataLayout()))
19682     return false;
19683 
19684   auto *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
19685 
19686   Function *VssegNFunc =
19687       Intrinsic::getDeclaration(SI->getModule(), FixedVssegIntrIds[Factor - 2],
19688                                 {VTy, SI->getPointerOperandType(), XLenTy});
19689 
19690   auto Mask = SVI->getShuffleMask();
19691   SmallVector<Value *, 10> Ops;
19692 
19693   for (unsigned i = 0; i < Factor; i++) {
19694     Value *Shuffle = Builder.CreateShuffleVector(
19695         SVI->getOperand(0), SVI->getOperand(1),
19696         createSequentialMask(Mask[i], VTy->getNumElements(), 0));
19697     Ops.push_back(Shuffle);
19698   }
19699   // This VL should be OK (should be executable in one vsseg instruction,
19700   // potentially under larger LMULs) because we checked that the fixed vector
19701   // type fits in isLegalInterleavedAccessType
19702   Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
19703   Ops.append({SI->getPointerOperand(), VL});
19704 
19705   Builder.CreateCall(VssegNFunc, Ops);
19706 
19707   return true;
19708 }
19709 
19710 bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
19711                                                            LoadInst *LI) const {
19712   assert(LI->isSimple());
19713   IRBuilder<> Builder(LI);
19714 
19715   // Only deinterleave2 supported at present.
19716   if (DI->getIntrinsicID() != Intrinsic::experimental_vector_deinterleave2)
19717     return false;
19718 
19719   unsigned Factor = 2;
19720 
19721   VectorType *VTy = cast<VectorType>(DI->getOperand(0)->getType());
19722   VectorType *ResVTy = cast<VectorType>(DI->getType()->getContainedType(0));
19723 
19724   if (!isLegalInterleavedAccessType(ResVTy, Factor, LI->getAlign(),
19725                                     LI->getPointerAddressSpace(),
19726                                     LI->getModule()->getDataLayout()))
19727     return false;
19728 
19729   Function *VlsegNFunc;
19730   Value *VL;
19731   Type *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
19732   SmallVector<Value *, 10> Ops;
19733 
19734   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
19735     VlsegNFunc = Intrinsic::getDeclaration(
19736         LI->getModule(), FixedVlsegIntrIds[Factor - 2],
19737         {ResVTy, LI->getPointerOperandType(), XLenTy});
19738     VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
19739   } else {
19740     static const Intrinsic::ID IntrIds[] = {
19741         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
19742         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
19743         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
19744         Intrinsic::riscv_vlseg8};
19745 
19746     VlsegNFunc = Intrinsic::getDeclaration(LI->getModule(), IntrIds[Factor - 2],
19747                                            {ResVTy, XLenTy});
19748     VL = Constant::getAllOnesValue(XLenTy);
19749     Ops.append(Factor, PoisonValue::get(ResVTy));
19750   }
19751 
19752   Ops.append({LI->getPointerOperand(), VL});
19753 
19754   Value *Vlseg = Builder.CreateCall(VlsegNFunc, Ops);
19755   DI->replaceAllUsesWith(Vlseg);
19756 
19757   return true;
19758 }
19759 
19760 bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
19761                                                           StoreInst *SI) const {
19762   assert(SI->isSimple());
19763   IRBuilder<> Builder(SI);
19764 
19765   // Only interleave2 supported at present.
19766   if (II->getIntrinsicID() != Intrinsic::experimental_vector_interleave2)
19767     return false;
19768 
19769   unsigned Factor = 2;
19770 
19771   VectorType *VTy = cast<VectorType>(II->getType());
19772   VectorType *InVTy = cast<VectorType>(II->getOperand(0)->getType());
19773 
19774   if (!isLegalInterleavedAccessType(InVTy, Factor, SI->getAlign(),
19775                                     SI->getPointerAddressSpace(),
19776                                     SI->getModule()->getDataLayout()))
19777     return false;
19778 
19779   Function *VssegNFunc;
19780   Value *VL;
19781   Type *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
19782 
19783   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
19784     VssegNFunc = Intrinsic::getDeclaration(
19785         SI->getModule(), FixedVssegIntrIds[Factor - 2],
19786         {InVTy, SI->getPointerOperandType(), XLenTy});
19787     VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
19788   } else {
19789     static const Intrinsic::ID IntrIds[] = {
19790         Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
19791         Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
19792         Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
19793         Intrinsic::riscv_vsseg8};
19794 
19795     VssegNFunc = Intrinsic::getDeclaration(SI->getModule(), IntrIds[Factor - 2],
19796                                            {InVTy, XLenTy});
19797     VL = Constant::getAllOnesValue(XLenTy);
19798   }
19799 
19800   Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1),
19801                                   SI->getPointerOperand(), VL});
19802 
19803   return true;
19804 }
19805 
19806 MachineInstr *
19807 RISCVTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
19808                                    MachineBasicBlock::instr_iterator &MBBI,
19809                                    const TargetInstrInfo *TII) const {
19810   assert(MBBI->isCall() && MBBI->getCFIType() &&
19811          "Invalid call instruction for a KCFI check");
19812   assert(is_contained({RISCV::PseudoCALLIndirect, RISCV::PseudoTAILIndirect},
19813                       MBBI->getOpcode()));
19814 
19815   MachineOperand &Target = MBBI->getOperand(0);
19816   Target.setIsRenamable(false);
19817 
19818   return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(RISCV::KCFI_CHECK))
19819       .addReg(Target.getReg())
19820       .addImm(MBBI->getCFIType())
19821       .getInstr();
19822 }
19823 
19824 #define GET_REGISTER_MATCHER
19825 #include "RISCVGenAsmMatcher.inc"
19826 
19827 Register
19828 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
19829                                        const MachineFunction &MF) const {
19830   Register Reg = MatchRegisterAltName(RegName);
19831   if (Reg == RISCV::NoRegister)
19832     Reg = MatchRegisterName(RegName);
19833   if (Reg == RISCV::NoRegister)
19834     report_fatal_error(
19835         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
19836   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
19837   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
19838     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
19839                              StringRef(RegName) + "\"."));
19840   return Reg;
19841 }
19842 
19843 MachineMemOperand::Flags
19844 RISCVTargetLowering::getTargetMMOFlags(const Instruction &I) const {
19845   const MDNode *NontemporalInfo = I.getMetadata(LLVMContext::MD_nontemporal);
19846 
19847   if (NontemporalInfo == nullptr)
19848     return MachineMemOperand::MONone;
19849 
19850   // 1 for default value work as __RISCV_NTLH_ALL
19851   // 2 -> __RISCV_NTLH_INNERMOST_PRIVATE
19852   // 3 -> __RISCV_NTLH_ALL_PRIVATE
19853   // 4 -> __RISCV_NTLH_INNERMOST_SHARED
19854   // 5 -> __RISCV_NTLH_ALL
19855   int NontemporalLevel = 5;
19856   const MDNode *RISCVNontemporalInfo =
19857       I.getMetadata("riscv-nontemporal-domain");
19858   if (RISCVNontemporalInfo != nullptr)
19859     NontemporalLevel =
19860         cast<ConstantInt>(
19861             cast<ConstantAsMetadata>(RISCVNontemporalInfo->getOperand(0))
19862                 ->getValue())
19863             ->getZExtValue();
19864 
19865   assert((1 <= NontemporalLevel && NontemporalLevel <= 5) &&
19866          "RISC-V target doesn't support this non-temporal domain.");
19867 
19868   NontemporalLevel -= 2;
19869   MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
19870   if (NontemporalLevel & 0b1)
19871     Flags |= MONontemporalBit0;
19872   if (NontemporalLevel & 0b10)
19873     Flags |= MONontemporalBit1;
19874 
19875   return Flags;
19876 }
19877 
19878 MachineMemOperand::Flags
19879 RISCVTargetLowering::getTargetMMOFlags(const MemSDNode &Node) const {
19880 
19881   MachineMemOperand::Flags NodeFlags = Node.getMemOperand()->getFlags();
19882   MachineMemOperand::Flags TargetFlags = MachineMemOperand::MONone;
19883   TargetFlags |= (NodeFlags & MONontemporalBit0);
19884   TargetFlags |= (NodeFlags & MONontemporalBit1);
19885 
19886   return TargetFlags;
19887 }
19888 
19889 bool RISCVTargetLowering::areTwoSDNodeTargetMMOFlagsMergeable(
19890     const MemSDNode &NodeX, const MemSDNode &NodeY) const {
19891   return getTargetMMOFlags(NodeX) == getTargetMMOFlags(NodeY);
19892 }
19893 
19894 bool RISCVTargetLowering::isCtpopFast(EVT VT) const {
19895   if (VT.isScalableVector())
19896     return isTypeLegal(VT) && Subtarget.hasStdExtZvbb();
19897   if (VT.isFixedLengthVector() && Subtarget.hasStdExtZvbb())
19898     return true;
19899   return Subtarget.hasStdExtZbb() &&
19900          (VT == MVT::i32 || VT == MVT::i64 || VT.isFixedLengthVector());
19901 }
19902 
19903 unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
19904                                                  ISD::CondCode Cond) const {
19905   return isCtpopFast(VT) ? 0 : 1;
19906 }
19907 
19908 bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
19909   // At the moment, the only scalable instruction GISel knows how to lower is
19910   // ret with scalable argument.
19911 
19912   if (Inst.getType()->isScalableTy())
19913     return true;
19914 
19915   for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
19916     if (Inst.getOperand(i)->getType()->isScalableTy() &&
19917         !isa<ReturnInst>(&Inst))
19918       return true;
19919 
19920   if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
19921     if (AI->getAllocatedType()->isScalableTy())
19922       return true;
19923   }
19924 
19925   return false;
19926 }
19927 
19928 SDValue
19929 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
19930                                    SelectionDAG &DAG,
19931                                    SmallVectorImpl<SDNode *> &Created) const {
19932   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
19933   if (isIntDivCheap(N->getValueType(0), Attr))
19934     return SDValue(N, 0); // Lower SDIV as SDIV
19935 
19936   // Only perform this transform if short forward branch opt is supported.
19937   if (!Subtarget.hasShortForwardBranchOpt())
19938     return SDValue();
19939   EVT VT = N->getValueType(0);
19940   if (!(VT == MVT::i32 || (VT == MVT::i64 && Subtarget.is64Bit())))
19941     return SDValue();
19942 
19943   // Ensure 2**k-1 < 2048 so that we can just emit a single addi/addiw.
19944   if (Divisor.sgt(2048) || Divisor.slt(-2048))
19945     return SDValue();
19946   return TargetLowering::buildSDIVPow2WithCMov(N, Divisor, DAG, Created);
19947 }
19948 
19949 bool RISCVTargetLowering::shouldFoldSelectWithSingleBitTest(
19950     EVT VT, const APInt &AndMask) const {
19951   if (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())
19952     return !Subtarget.hasStdExtZbs() && AndMask.ugt(1024);
19953   return TargetLowering::shouldFoldSelectWithSingleBitTest(VT, AndMask);
19954 }
19955 
19956 unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const {
19957   return Subtarget.getMinimumJumpTableEntries();
19958 }
19959 
19960 namespace llvm::RISCVVIntrinsicsTable {
19961 
19962 #define GET_RISCVVIntrinsicsTable_IMPL
19963 #include "RISCVGenSearchableTables.inc"
19964 
19965 } // namespace llvm::RISCVVIntrinsicsTable
19966